diff --git a/.common-ci.yml b/.common-ci.yml index 35351153f..6a37d7989 100644 --- a/.common-ci.yml +++ b/.common-ci.yml @@ -38,13 +38,13 @@ workflow: - if: $CI_COMMIT_BRANCH - if: $CI_COMMIT_TAG - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_BRANCH == "master" + - if: $CI_COMMIT_BRANCH == "main" - if: $CI_COMMIT_BRANCH =~ /^release-.*$/ - if: $CI_COMMIT_TAG && $CI_COMMIT_TAG != "" .main-or-manual: rules: - - if: $CI_COMMIT_BRANCH == "master" + - if: $CI_COMMIT_BRANCH == "main" - if: $CI_COMMIT_BRANCH =~ /^release-.*$/ - if: $CI_COMMIT_TAG && $CI_COMMIT_TAG != "" - if: $CI_PIPELINE_SOURCE == "schedule" @@ -71,7 +71,7 @@ trigger-pipeline: .buildx-setup: before_script: - - export BUILDX_VERSION=v0.6.3 + - export BUILDX_VERSION=v0.15.1 - apk add --no-cache curl - mkdir -p ~/.docker/cli-plugins - curl -sSLo ~/.docker/cli-plugins/docker-buildx "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.linux-amd64" @@ -82,9 +82,9 @@ trigger-pipeline: - '[[ -n "${SKIP_QEMU_SETUP}" ]] || docker run --rm --privileged multiarch/qemu-user-static --reset -p yes' # Define targets for the gpu-operator and gpu-operator-validator images -.dist-ubi8: +.dist-ubi9: variables: - DIST: ubi8 + DIST: ubi9 CVE_UPDATES: "cyrus-sasl-lib" .target-gpu-operator: @@ -99,6 +99,7 @@ trigger-pipeline: IMAGE_NAME: "${CI_REGISTRY_IMAGE}/gpu-operator-validator" IN_IMAGE_NAME: "gpu-operator-validator" IMAGE_ARCHIVE: "gpu-operator-validator.tar" + IN_REGISTRY: "${STAGING_REGISTRY}/gpu-operator" # .release forms the base of the deployment jobs which push images to the CI registry. # This is extended with the version to be deployed (e.g. the SHA or TAG) and the @@ -149,7 +150,7 @@ trigger-pipeline: # Download the regctl binary for use in the release steps .regctl-setup: before_script: - - export REGCTL_VERSION=v0.3.10 + - export REGCTL_VERSION=v0.7.2 - apk add --no-cache curl - mkdir -p bin - curl -sSLo bin/regctl https://github.com/regclient/regclient/releases/download/${REGCTL_VERSION}/regctl-linux-amd64 @@ -181,7 +182,7 @@ trigger-pipeline: release:staging-gpu-operator: extends: - .release:staging - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator variables: OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/staging/gpu-operator" @@ -189,7 +190,7 @@ release:staging-gpu-operator: release:staging-gpu-operator-validator: extends: - .release:staging - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator-validator variables: OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/staging/gpu-operator-validator" @@ -197,7 +198,7 @@ release:staging-gpu-operator-validator: release:staging-latest-gpu-operator: extends: - .release:staging - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator variables: OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/staging/gpu-operator" @@ -208,7 +209,7 @@ release:staging-latest-gpu-operator: release:staging-latest-gpu-operator-validator: extends: - .release:staging - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator-validator variables: OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/staging/gpu-operator-validator" diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 41dc2bb8a..268e75212 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -39,10 +39,10 @@ _Detailed steps to reproduce the issue._ Collecting full debug bundle (optional): ``` -curl -o must-gather.sh -L https://raw.githubusercontent.com/NVIDIA/gpu-operator/master/hack/must-gather.sh +curl -o must-gather.sh -L https://raw.githubusercontent.com/NVIDIA/gpu-operator/main/hack/must-gather.sh chmod +x must-gather.sh ./must-gather.sh ``` -**NOTE**: please refer to the [must-gather](https://raw.githubusercontent.com/NVIDIA/gpu-operator/master/hack/must-gather.sh) script for debug data collected. +**NOTE**: please refer to the [must-gather](https://raw.githubusercontent.com/NVIDIA/gpu-operator/main/hack/must-gather.sh) script for debug data collected. This bundle can be submitted to us via email: **operator_feedback@nvidia.com** diff --git a/.github/copy-pr-bot.yaml b/.github/copy-pr-bot.yaml new file mode 100644 index 000000000..c61a81b13 --- /dev/null +++ b/.github/copy-pr-bot.yaml @@ -0,0 +1,3 @@ +# https://docs.gha-runners.nvidia.com/apps/copy-pr-bot/#configuration + +enabled: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..7ba9bbfe8 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,43 @@ +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "gomod" + target-branch: main + directory: "/" + schedule: + interval: "daily" + labels: + - dependencies + groups: + k8sio: + patterns: + - k8s.io/* + exclude-patterns: + - k8s.io/klog/* + + - package-ecosystem: "gomod" + target-branch: main + directory: "/tools" + schedule: + interval: "daily" + labels: + - dependencies + + # Update GPU Operator base images. + - package-ecosystem: "docker" + directory: "/docker" + schedule: + interval: "daily" + + # Update GPU Operator Validator base images. + - package-ecosystem: "docker" + directory: "/validator" + schedule: + interval: "daily" + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index e8e3d86dc..000000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,5 +0,0 @@ -Hello! - -Thanks for making this contribution! When contributing to this repository please keep in mind the following: -- [You should sign your work](https://github.com/NVIDIA/gpu-operator/blob/master/CONTRIBUTING.md). -- You should be making your contribution against the [gitlab.com repository](https://gitlab.com/nvidia/kubernetes/gpu-operator) as github.com is just a mirror. diff --git a/.github/workflows/blossom-ci.yml b/.github/workflows/blossom-ci.yml deleted file mode 100644 index a5b37eb4f..000000000 --- a/.github/workflows/blossom-ci.yml +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2020-2023, NVIDIA CORPORATION. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A workflow to trigger ci on hybrid infra (github + self hosted runner) -name: Blossom-CI -on: - issue_comment: - types: [created] - workflow_dispatch: - inputs: - platform: - description: 'runs-on argument' - required: false - args: - description: 'argument' - required: false -jobs: - Authorization: - name: Authorization - runs-on: blossom - outputs: - args: ${{ env.args }} - - # This job only runs for pull request comments - if: | - contains( '\ - anstockatnv,\ - rorajani,\ - cdesiniotis,\ - shivamerla,\ - ArangoGutierrez,\ - elezar,\ - klueska,\ - zvonkok,\ - ', format('{0},', github.actor)) && - github.event.comment.body == '/blossom-ci' - steps: - - name: Check if comment is issued by authorized person - run: blossom-ci - env: - OPERATION: 'AUTH' - REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO_KEY_DATA: ${{ secrets.BLOSSOM_KEY }} - - Vulnerability-scan: - name: Vulnerability scan - needs: [Authorization] - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v2 - with: - repository: ${{ fromJson(needs.Authorization.outputs.args).repo }} - ref: ${{ fromJson(needs.Authorization.outputs.args).ref }} - lfs: 'true' - - # repo specific steps - #- name: Setup java - # uses: actions/setup-java@v1 - # with: - # java-version: 1.8 - - # add blackduck properties https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/631308372/Methods+for+Configuring+Analysis#Using-a-configuration-file - #- name: Setup blackduck properties - # run: | - # PROJECTS=$(mvn -am dependency:tree | grep maven-dependency-plugin | awk '{ out="com.nvidia:"$(NF-1);print out }' | grep rapids | xargs | sed -e 's/ /,/g') - # echo detect.maven.build.command="-pl=$PROJECTS -am" >> application.properties - # echo detect.maven.included.scopes=compile >> application.properties - - - name: Run blossom action - uses: NVIDIA/blossom-action@main - env: - REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO_KEY_DATA: ${{ secrets.BLOSSOM_KEY }} - with: - args1: ${{ fromJson(needs.Authorization.outputs.args).args1 }} - args2: ${{ fromJson(needs.Authorization.outputs.args).args2 }} - args3: ${{ fromJson(needs.Authorization.outputs.args).args3 }} - - Job-trigger: - name: Start ci job - needs: [Vulnerability-scan] - runs-on: blossom - steps: - - name: Start ci job - run: blossom-ci - env: - OPERATION: 'START-CI-JOB' - CI_SERVER: ${{ secrets.CI_SERVER }} - REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - Upload-Log: - name: Upload log - runs-on: blossom - if : github.event_name == 'workflow_dispatch' - steps: - - name: Jenkins log for pull request ${{ fromJson(github.event.inputs.args).pr }} (click here) - run: blossom-ci - env: - OPERATION: 'POST-PROCESSING' - CI_SERVER: ${{ secrets.CI_SERVER }} - REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 000000000..79f3e17ac --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,312 @@ +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: CI + +on: + push: + branches: + - "pull-request/[0-9]+" + - main + - release-* + +jobs: + ### Configuration checks ### + helm-lint: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Helm + uses: azure/setup-helm@v4.2.0 + id: install + - run: helm lint deployments/gpu-operator/ + validate-csv: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Get Golang version + id: vars + run: | + GOLANG_VERSION=$( grep "GOLANG_VERSION ?=" versions.mk ) + echo "GOLANG_VERSION=${GOLANG_VERSION##GOLANG_VERSION ?= }" >> $GITHUB_ENV + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GOLANG_VERSION }} + - run: make validate-csv + validate-helm-values: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Get Golang version + id: vars + run: | + GOLANG_VERSION=$( grep "GOLANG_VERSION ?=" versions.mk ) + echo "GOLANG_VERSION=${GOLANG_VERSION##GOLANG_VERSION ?= }" >> $GITHUB_ENV + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GOLANG_VERSION }} + - run: make validate-helm-values + + ### Golang checks and build ### + go-check: + needs: [helm-lint, validate-csv, validate-helm-values] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + name: Checkout code + - name: Get Golang version + id: vars + run: | + GOLANG_VERSION=$( grep "GOLANG_VERSION ?=" versions.mk ) + GOLANGCI_LINT_VERSION=$( grep "GOLANGCI_LINT_VERSION ?=" versions.mk ) + echo "GOLANG_VERSION=${GOLANG_VERSION##GOLANG_VERSION ?= }" >> $GITHUB_ENV + echo "GOLANGCI_LINT_VERSION=${GOLANGCI_LINT_VERSION##GOLANGCI_LINT_VERSION ?= }" >> $GITHUB_ENV + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GOLANG_VERSION }} + - name: Lint + uses: golangci/golangci-lint-action@v6 + with: + version: ${{ env.GOLANGCI_LINT_VERSION }} + args: -v --timeout 5m + skip-cache: true + - run: make check + go-test: + needs: [helm-lint, validate-csv, validate-helm-values] + name: unit tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Get Golang version + id: vars + run: | + GOLANG_VERSION=$( grep "GOLANG_VERSION ?=" versions.mk ) + echo "GOLANG_VERSION=${GOLANG_VERSION##GOLANG_VERSION ?= }" >> $GITHUB_ENV + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GOLANG_VERSION }} + - run: make coverage + go-build: + needs: [helm-lint, validate-csv, validate-helm-values] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + name: Checkout code + - run: make docker-build + + ### Image builds ### + build-gpu-operator: + needs: [go-check, go-test, go-build] + runs-on: ubuntu-latest + strategy: + matrix: + dist: [ubi9] + steps: + - uses: actions/checkout@v4 + name: Check out code + - name: Calculate build vars + id: vars + run: | + echo "COMMIT_SHORT_SHA=${GITHUB_SHA:0:8}" >> $GITHUB_ENV + echo "LOWERCASE_REPO_OWNER=$(echo "${GITHUB_REPOSITORY_OWNER}" | awk '{print tolower($0)}')" >> $GITHUB_ENV + REPO_FULL_NAME="${{ github.event.pull_request.head.repo.full_name }}" + echo "${REPO_FULL_NAME}" + echo "LABEL_IMAGE_SOURCE=https://github.com/${REPO_FULL_NAME}" >> $GITHUB_ENV + + GENERATE_ARTIFACTS="false" + if [[ "${{ github.actor }}" == "dependabot[bot]" ]]; then + GENERATE_ARTIFACTS="false" + elif [[ "${{ github.event_name }}" == "pull_request" && "${{ github.event.pull_request.head.repo.full_name }}" == "${{ github.repository }}" ]]; then + GENERATE_ARTIFACTS="true" + elif [[ "${{ github.event_name }}" == "push" ]]; then + GENERATE_ARTIFACTS="true" + fi + echo "PUSH_ON_BUILD=${GENERATE_ARTIFACTS}" >> $GITHUB_ENV + echo "BUILD_MULTI_ARCH_IMAGES=${GENERATE_ARTIFACTS}" >> $GITHUB_ENV + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build image + env: + IMAGE_NAME: ghcr.io/${LOWERCASE_REPO_OWNER}/gpu-operator + VERSION: ${COMMIT_SHORT_SHA} + run: | + echo "${VERSION}" + make build-${{ matrix.dist }} + build-gpu-operator-validator: + needs: [go-check, go-test, go-build] + runs-on: ubuntu-latest + strategy: + matrix: + dist: [ubi9] + steps: + - uses: actions/checkout@v4 + name: Check out code + - name: Calculate build vars + id: vars + run: | + echo "COMMIT_SHORT_SHA=${GITHUB_SHA:0:8}" >> $GITHUB_ENV + echo "LOWERCASE_REPO_OWNER=$(echo "${GITHUB_REPOSITORY_OWNER}" | awk '{print tolower($0)}')" >> $GITHUB_ENV + REPO_FULL_NAME="${{ github.event.pull_request.head.repo.full_name }}" + echo "${REPO_FULL_NAME}" + echo "LABEL_IMAGE_SOURCE=https://github.com/${REPO_FULL_NAME}" >> $GITHUB_ENV + + GENERATE_ARTIFACTS="false" + if [[ "${{ github.actor }}" == "dependabot[bot]" ]]; then + GENERATE_ARTIFACTS="false" + elif [[ "${{ github.event_name }}" == "pull_request" && "${{ github.event.pull_request.head.repo.full_name }}" == "${{ github.repository }}" ]]; then + GENERATE_ARTIFACTS="true" + elif [[ "${{ github.event_name }}" == "push" ]]; then + GENERATE_ARTIFACTS="true" + fi + echo "PUSH_ON_BUILD=${GENERATE_ARTIFACTS}" >> $GITHUB_ENV + echo "BUILD_MULTI_ARCH_IMAGES=${GENERATE_ARTIFACTS}" >> $GITHUB_ENV + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build image + env: + IMAGE_NAME: ghcr.io/${LOWERCASE_REPO_OWNER}/gpu-operator/gpu-operator-validator + VERSION: ${COMMIT_SHORT_SHA} + SUBCOMPONENT: validator + run: | + echo "${VERSION}" + make build-${{ matrix.dist }} + + ### e2e tests ### + e2e-tests-containerd: + needs: [build-gpu-operator, build-gpu-operator-validator] + runs-on: linux-amd64-cpu4 + steps: + - uses: actions/checkout@v4 + name: Check out code + - name: Set up Holodeck + uses: NVIDIA/holodeck@v0.2.1 + with: + aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws_ssh_key: ${{ secrets.AWS_SSH_KEY }} + holodeck_config: "tests/holodeck.yaml" + - name: Get public dns name + id: get_public_dns_name + uses: mikefarah/yq@master + with: + cmd: yq '.status.properties[] | select(.name == "public-dns-name") | .value' /github/workspace/.cache/holodeck.yaml + - name: Calculate test vars + id: vars + run: | + COMMIT_SHORT_SHA=${GITHUB_SHA:0:8} + echo "COMMIT_SHORT_SHA=${GITHUB_SHA:0:8}" >> $GITHUB_ENV + LOWERCASE_REPO_OWNER=$(echo "${GITHUB_REPOSITORY_OWNER}" | awk '{print tolower($0)}') + echo "LOWERCASE_REPO_OWNER=$(echo "${GITHUB_REPOSITORY_OWNER}" | awk '{print tolower($0)}')" >> $GITHUB_ENV + + echo "OPERATOR_VERSION=${COMMIT_SHORT_SHA}" >> $GITHUB_ENV + echo "OPERATOR_IMAGE=ghcr.io/${LOWERCASE_REPO_OWNER}/gpu-operator" >> $GITHUB_ENV + echo "VALIDATOR_VERSION=${COMMIT_SHORT_SHA}" >> $GITHUB_ENV + echo "VALIDATOR_IMAGE=ghcr.io/${LOWERCASE_REPO_OWNER}/gpu-operator/gpu-operator-validator" >> $GITHUB_ENV + + echo "instance_hostname=ubuntu@${{ steps.get_public_dns_name.outputs.result }}" >> $GITHUB_ENV + echo "private_key=${{ github.workspace }}/key.pem" >> $GITHUB_ENV + - name: Run e2e tests + env: + GPU_PRODUCT_NAME: "Tesla-T4" + SKIP_LAUNCH: "true" + CONTAINER_RUNTIME: "containerd" + TEST_CASE: "./tests/cases/defaults.sh" + run: | + echo "${{ secrets.AWS_SSH_KEY }}" > ${private_key} && chmod 400 ${private_key} + ./tests/ci-run-e2e.sh ${OPERATOR_IMAGE} ${OPERATOR_VERSION} ${VALIDATOR_IMAGE} ${VALIDATOR_VERSION} ${GPU_PRODUCT_NAME} ${TEST_CASE} || rc=$? + ./tests/scripts/pull.sh /tmp/logs logs + exit $rc + - name: Archive test logs + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: containerd-e2e-test-logs + path: ./logs/ + retention-days: 15 + + e2e-tests-nvidiadriver: + needs: [build-gpu-operator, build-gpu-operator-validator] + runs-on: linux-amd64-cpu4 + steps: + - uses: actions/checkout@v4 + name: Check out code + - name: Set up Holodeck + uses: NVIDIA/holodeck@v0.2.1 + with: + aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws_ssh_key: ${{ secrets.AWS_SSH_KEY }} + holodeck_config: "tests/holodeck.yaml" + - name: Get public dns name + id: get_public_dns_name + uses: mikefarah/yq@master + with: + cmd: yq '.status.properties[] | select(.name == "public-dns-name") | .value' /github/workspace/.cache/holodeck.yaml + - name: Calculate test vars + id: vars + run: | + COMMIT_SHORT_SHA=${GITHUB_SHA:0:8} + echo "COMMIT_SHORT_SHA=${GITHUB_SHA:0:8}" >> $GITHUB_ENV + LOWERCASE_REPO_OWNER=$(echo "${GITHUB_REPOSITORY_OWNER}" | awk '{print tolower($0)}') + echo "LOWERCASE_REPO_OWNER=$(echo "${GITHUB_REPOSITORY_OWNER}" | awk '{print tolower($0)}')" >> $GITHUB_ENV + + echo "OPERATOR_VERSION=${COMMIT_SHORT_SHA}" >> $GITHUB_ENV + echo "OPERATOR_IMAGE=ghcr.io/${LOWERCASE_REPO_OWNER}/gpu-operator" >> $GITHUB_ENV + echo "VALIDATOR_VERSION=${COMMIT_SHORT_SHA}" >> $GITHUB_ENV + echo "VALIDATOR_IMAGE=ghcr.io/${LOWERCASE_REPO_OWNER}/gpu-operator/gpu-operator-validator" >> $GITHUB_ENV + + echo "instance_hostname=ubuntu@${{ steps.get_public_dns_name.outputs.result }}" >> $GITHUB_ENV + echo "private_key=${{ github.workspace }}/key.pem" >> $GITHUB_ENV + - name: Run e2e tests + env: + GPU_PRODUCT_NAME: "Tesla-T4" + SKIP_LAUNCH: "true" + CONTAINER_RUNTIME: "containerd" + TEST_CASE: "./tests/cases/nvidia-driver.sh" + run: | + echo "${{ secrets.AWS_SSH_KEY }}" > ${private_key} && chmod 400 ${private_key} + ./tests/ci-run-e2e.sh ${OPERATOR_IMAGE} ${OPERATOR_VERSION} ${VALIDATOR_IMAGE} ${VALIDATOR_VERSION} ${GPU_PRODUCT_NAME} ${TEST_CASE} || rc=$? + ./tests/scripts/pull.sh /tmp/logs logs + exit $rc + - name: Archive test logs + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: nvidiadriver-e2e-test-logs + path: ./logs/ + retention-days: 15 diff --git a/.github/workflows/pre-sanity.yml b/.github/workflows/pre-sanity.yml deleted file mode 100644 index 7e2ef5822..000000000 --- a/.github/workflows/pre-sanity.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Run pre sanity - -# run this workflow for each commit -on: [pull_request] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Build dev image - run: make .build-image - - - name: Build - run: make docker-build - - - name: Tests - run: make docker-coverage - - - name: Checks - run: make docker-check diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 898348232..c4b2969c9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -95,13 +95,13 @@ unit-tests: build:gpu-operator: extends: - .image-build - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator build:gpu-operator-validator: extends: - .image-build - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator-validator .e2e_defaults: @@ -111,13 +111,13 @@ build:gpu-operator-validator: TF_VAR_additional_ingress_ip_ranges: '["216.228.112.0/26", "217.111.27.192/26"]' # These should match the images generated by the deploy step. # TODO: Should these use the staging release instead? - OPERATOR_VERSION: "${CI_COMMIT_SHORT_SHA}-${DIST}" + OPERATOR_VERSION: "${CI_COMMIT_SHORT_SHA}" OPERATOR_IMAGE: "${CI_REGISTRY_IMAGE}" - VALIDATOR_VERSION: "${CI_COMMIT_SHORT_SHA}-${DIST}" + VALIDATOR_VERSION: "${CI_COMMIT_SHORT_SHA}" VALIDATOR_IMAGE: "${CI_REGISTRY_IMAGE}/gpu-operator-validator" GPU_PRODUCT_NAME: "Tesla-T4" extends: - - .dist-ubi8 + - .dist-ubi9 except: variables: - $CI_COMMIT_MESSAGE =~ /skip-end-to-end-tests/ diff --git a/.golangci.yml b/.golangci.yml index f8bc00608..3ba1bf0b3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,3 +22,4 @@ linters-settings: excludes: - G101 - G404 + - G115 diff --git a/.nvidia-ci.yml b/.nvidia-ci.yml index 5fd0a5924..bbf7392cc 100644 --- a/.nvidia-ci.yml +++ b/.nvidia-ci.yml @@ -14,13 +14,13 @@ include: ref: "2023.10.09" variables: - # Release "devel"-tagged images off the master branch - RELEASE_DEVEL_BRANCH: "master" + # Release "devel"-tagged images off the main branch + RELEASE_DEVEL_BRANCH: "main" DEVEL_RELEASE_IMAGE_VERSION: "devel" # On the multi-arch builder we don't need the qemu setup. SKIP_QEMU_SETUP: "1" # Define the public staging registry - STAGING_REGISTRY: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging + STAGING_REGISTRY: ghcr.io/nvidia STAGING_VERSION: ${CI_COMMIT_SHORT_SHA} GIT_SUBMODULE_PATHS: cnt-ci @@ -47,21 +47,21 @@ variables: - !reference [.regctl-setup, before_script] - apk add --no-cache make bash - > - regctl manifest get ${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} --list > /dev/null && echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST}" || ( echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} does not exist" && sleep infinity ) + regctl manifest get ${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION} --list > /dev/null && echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}" || ( echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION} does not exist" && sleep infinity ) script: - regctl registry login "${OUT_REGISTRY}" -u "${OUT_REGISTRY_USER}" -p "${OUT_REGISTRY_TOKEN}" - - make IMAGE=${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} OUT_IMAGE=${OUT_IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}-${DIST} push-${DIST} + - make IMAGE=${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION} OUT_IMAGE=${OUT_IMAGE_NAME}:${CI_COMMIT_SHORT_SHA} push-${DIST} image:gpu-operator: extends: - .image-pull - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator image:gpu-operator-validator: extends: - .image-pull - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator-validator variables: OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/gpu-operator-validator" @@ -80,7 +80,7 @@ image:gpu-operator-validator: stage: scan image: "${PULSE_IMAGE}" variables: - IMAGE: "${IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}-${DIST}" + IMAGE: "${IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}" IMAGE_ARCHIVE: "gpu-operator.tar" except: variables: @@ -109,7 +109,7 @@ image:gpu-operator-validator: .scan:gpu-operator: extends: - .scan - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator needs: - image:gpu-operator @@ -129,7 +129,7 @@ scan:gpu-operator-arm64: .scan:gpu-operator-validator: extends: - .scan - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator-validator needs: - image:gpu-operator-validator @@ -158,13 +158,13 @@ scan:gpu-operator-validator-arm64: release:ngc-gpu-operator: extends: - .release:ngc - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator release:ngc-gpu-operator-validator: extends: - .release:ngc - - .dist-ubi8 + - .dist-ubi9 - .target-gpu-operator-validator variables: IN_IMAGE_NAME: "gpu-operator-validator" @@ -174,53 +174,56 @@ release:ngc-gpu-operator-validator: # Download the ngc cli binary for use in the sign steps .ngccli-setup: before_script: - - export NGCLI_VERSION=3.31.0 - - apk add --no-cache curl - - curl -sSLo ngccli_linux.zip https://api.ngc.nvidia.com/v2/resources/nvidia/ngc-apps/ngc_cli/versions/${NGCLI_VERSION}/files/ngccli_linux.zip + - apt-get update && apt-get install -y curl unzip jq + - | + if [ -z "${NGCCLI_VERSION}" ]; then + NGC_VERSION_URL="https://api.ngc.nvidia.com/v2/resources/nvidia/ngc-apps/ngc_cli/versions" + # Extract the latest version from the JSON data using jq + export NGCCLI_VERSION=$(curl -s $NGC_VERSION_URL | jq -r '.recipe.latestVersionIdStr') + fi + echo "NGCCLI_VERSION ${NGCCLI_VERSION}" + - curl -sSLo ngccli_linux.zip https://api.ngc.nvidia.com/v2/resources/nvidia/ngc-apps/ngc_cli/versions/${NGCCLI_VERSION}/files/ngccli_linux.zip - unzip ngccli_linux.zip - chmod u+x ngc-cli/ngc - - export PATH=$(pwd)/ngc-cli:${PATH} - - ngc config set --api_key=${NGC_REGISTRY_TOKEN} --org=nvidia # .sign forms the base of the deployment jobs which signs images in the CI registry. # This is extended with the image name and version to be deployed. .sign:ngc: + image: ubuntu:latest stage: sign + rules: + - if: $CI_COMMIT_TAG variables: - IMAGE_NAME: "${OUT_IMAGE_NAME}" - VERSION: "${OUT_IMAGE_VERSION}" + NGC_CLI_API_KEY: "${NGC_REGISTRY_TOKEN}" + IMAGE_NAME: "${NGC_REGISTRY_IMAGE}" + IMAGE_TAG: "${CI_COMMIT_TAG}" retry: max: 2 before_script: - !reference [.ngccli-setup, before_script] - # We ensure that the OUT_IMAGE_VERSION is set - - 'echo Version: ${IMAGE_NAME} ; [[ -n "${VERSION}" ]] || exit 1' - - apk add --no-cache bash + # We ensure that the IMAGE_NAME and IMAGE_TAG is set + - 'echo Image Name: ${IMAGE_NAME} && [[ -n "${IMAGE_NAME}" ]] || exit 1' + - 'echo Image Tag: ${IMAGE_TAG} && [[ -n "${IMAGE_TAG}" ]] || exit 1' script: - - 'echo "Signing the image ${IMAGE_NAME}:${VERSION}"' - - 'echo "ngc registry image publish --source ${IMAGE_NAME}:${VERSION} ${IMAGE_NAME}:${VERSION} --public --discoverable --allow-guest --sign"' + - 'echo "Signing the image ${IMAGE_NAME}:${IMAGE_TAG}"' + - ngc-cli/ngc registry image publish --source ${IMAGE_NAME}:${IMAGE_TAG} ${IMAGE_NAME}:${IMAGE_TAG} --public --discoverable --allow-guest --sign --org nvidia sign:ngc-gpu-operator: extends: - .sign:ngc needs: - release:ngc-gpu-operator - rules: - - if: $CI_COMMIT_TAG variables: - OUT_IMAGE_VERSION: "${CI_COMMIT_TAG}" - OUT_IMAGE_NAME: "${NGC_REGISTRY_IMAGE}" # This needs to change for the gpu-operator and gpu-operator-validator + IMAGE_TAG: "${CI_COMMIT_TAG}" sign:ngc-gpu-operator-validator: extends: - .sign:ngc needs: - release:ngc-gpu-operator-validator - rules: - - if: $CI_COMMIT_TAG variables: - OUT_IMAGE_VERSION: "${CI_COMMIT_TAG}" - OUT_IMAGE_NAME: "${NGC_PROD_VALIDATOR_IMAGE}" + IMAGE_NAME: "${NGC_PROD_VALIDATOR_IMAGE}" + IMAGE_TAG: "${CI_COMMIT_TAG}" .schedule_defaults: rules: @@ -234,7 +237,7 @@ sign:ngc-gpu-operator-validator: OPERATOR_IMAGE: "${STAGING_REGISTRY}/gpu-operator" VALIDATOR_VERSION: "${CI_COMMIT_SHORT_SHA}" VALIDATOR_IMAGE: "${STAGING_REGISTRY}/gpu-operator-validator" - TARGET_DRIVER_VERSION: "525.147.05" + TARGET_DRIVER_VERSION: "565.57.01" .e2e_tests: extends: @@ -284,7 +287,7 @@ e2e_tests_containerd_k8s1_27: - .e2e_defaults - .infra_setup_defaults - .schedule_defaults - + .clean_infra: extends: - .cnt_kube_clean @@ -308,7 +311,7 @@ cnt_kube_setup_containerd_k8s1_25: variables: TF_VAR_kubernetes_version: "1.25.11" TF_VAR_gpu_device_name: "NVIDIA-A100-PCIE-40GB" - + cnt_kube_clean_containerd_k8s1_25: extends: - .clean_infra diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c1b5ac802..5c05a316b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,34 +15,41 @@ The NVIDIA GPU Operator is an open-source product built and maintained by NVIDIA ## Architecture The GPU Operator is made up of the following software components - each of the components runs as a container, including NVIDIA drivers. The associated code is linked to each of the components below: -* [gpu-operator](https://gitlab.com/nvidia/kubernetes/gpu-operator) +* [gpu-operator](https://github.com/NVIDIA/gpu-operator) * [k8s-device-plugin](https://github.com/NVIDIA/k8s-device-plugin) -* [driver](https://gitlab.com/nvidia/container-images/driver) -* [container-toolkit](https://gitlab.com/nvidia/container-toolkit/container-config) +* [driver](https://github.com/NVIDIA/gpu-driver-container) +* [container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) * [dcgm-exporter](https://github.com/NVIDIA/dcgm-exporter) -* [gpu-feature-discovery](https://gitlab.com/nvidia/kubernetes/gpu-feature-discovery) -* [mig-manager](https://gitlab.com/nvidia/cloud-native/mig-parted) -* [samples](https://gitlab.com/nvidia/container-images/samples/-/tree/main/cuda/archive/rhel-ubi8/vector-add) +* [gpu-feature-discovery](https://github.com/NVIDIA/k8s-device-plugin) +* [mig-manager](https://github.com/NVIDIA/mig-parted) +* [sandbox-device-plugin](https://github.com/NVIDIA/kubevirt-gpu-device-plugin) +* [vgpu-device-manager](https://github.com/NVIDIA/vgpu-device-manager) +* [kata-manager](https://github.com/NVIDIA/k8s-kata-manager) +* [samples](https://github.com/NVIDIA/k8s-samples) ``` -gitlab.com/ -├── nvidia/ -│ ├── gpu-operator (CRD and controller logic that implements the reconciliation) -│ ├── k8s-device-plugin (NVIDIA Device Plugin for Kubernetes) -│ ├── driver (NVIDIA Driver qualified for data center GPUs) -│ ├── container-toolkit (NVIDIA Container Toolkit, runtime for Docker) -│ ├── dcgm-exporter (NVIDIA DCGM for monitoring and telemetry) -│ ├── gpu-feature-discovery (NVIDIA GPU Feature Discovery for Kubernetes) -│ ├── mig-manager (NVIDIA Multi-Instance GPU Manager for Kubernetes) -│ ├── samples (CUDA VectorAdd sample used for validation steps) +github.com/ +├── NVIDIA/ +│ ├── gpu-operator (CRD and controller logic that implements the reconciliation) +│ ├── k8s-device-plugin (NVIDIA Device Plugin for Kubernetes) +│ ├── gpu-driver-container (NVIDIA Driver qualified for data center GPUs) +│ ├── nvidia-container-toolkit (NVIDIA Container Toolkit, runtime for Docker) +│ ├── dcgm-exporter (NVIDIA DCGM for monitoring and telemetry) +│ ├── gpu-feature-discovery (NVIDIA GPU Feature Discovery for Kubernetes) +│ ├── mig-manager (NVIDIA Multi-Instance GPU Manager for Kubernetes) +│ ├── sandbox-device-plugin (NVIDIA Device Plugin for sandboxed environments) +│ ├── vgpu-device-manager (NVIDIA vGPU Device Manager for Kubernetes) +│ ├── kata-manager (NVIDIA Kata Manager for Kubernetes) +│ ├── samples (CUDA VectorAdd sample used for validation steps) ``` ## License The NVIDIA GPU Operator is open-source and its components are licensed under the permissive Apache 2.0 license. ## Artifacts -The NVIDIA GPU Operator has three artifacts as part of the product release: +The NVIDIA GPU Operator has the following artifacts as part of the product release: 1. [Source Code](#source-code) +1. [Documentation](#documentation) 1. [Container Images](#container-images) 1. [Helm Charts](#helm-charts) @@ -50,53 +57,27 @@ The GPU Operator releases follow [calendar versioning](https://calver.org/). ### Source Code -The NVIDIA GPU Operator is available on two external source code repositories: -* GitHub: https://github.com/NVIDIA/gpu-operator -* GitLab: https://gitlab.com/nvidia/kubernetes/gpu-operator +The NVIDIA GPU Operator source code is available on GitHub at https://github.com/NVIDIA/gpu-operator -The product page of the GPU Operator is available on NVIDIA’s official repository on GitHub. GitHub is where we interact primarily with users for issues related to the operator. GitHub is a mirror of the source code repository on GitLab - no development happens on GitHub. +### Documentation -GitLab is where the GPU Operator is actively developed - we leverage GitLab’s CI/CD infrastructure for build, test, package and release of the Operator. GitLab is where we expect users and partners to contribute patches (“Merge Requests” or “MRs”) against the source code repository. MRs do not require explicit contributor license agreements (CLA), but we expect contributors to sign their work. +The official NVIDIA GPU Operator documentation is available at https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/index.html ### Container Images -Releases of the GPU Operator include container images that are currently available on NVIDIA’s Docker Hub [repository](https://hub.docker.com/u/nvidia). In the future, the operator will be available on [NVIDIA NGC Catalog](https://ngc.nvidia.com/). - -The following are the container images (and tag format) that are released: -``` -├── nvidia/ -│ ├── gpu-operator () -│ ├── k8s-device-plugin () -│ ├── driver () -│ ├── container-toolkit () -│ ├── dcgm-exporter () -│ ├── gpu-feature-discovery () -│ ├── mig-manager () -│ ├── samples () -``` +Releases of the GPU Operator include container images that are currently available on [NVIDIA NGC Catalog](https://ngc.nvidia.com/). ### Helm Charts To simplify the deployment, the Operator can be installed using a Helm chart (note only Helm v3 is supported). The documentation for helm installation can be viewed [here](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/getting-started.html#install-helm). -Continuous (‘nightly’) releases of the operator are available. Release milestones are available under ‘stable’. -``` -├── nightly/index.yaml -├── stable/index.yaml (default when installing the operator) -``` + ## Contributions NVIDIA is willing to work with partners for adding platform support for the GPU Operator. The GPU Operator is open-source and permissively licensed under the Apache 2.0 license with only minimal requirements for source code [contributions](#signing). -To get started with building the GPU Operator, follow these steps: - -```shell -$ git clone https://gitlab.com/nvidia/kubernetes/gpu-operator.git -$ cd gpu-operator -$ make .build-image -``` -We also use a CI infrastructure on AWS for nightly and per-change testing on the GPU Operator. This infrastructure is available here: https://gitlab.com/nvidia/container-infrastructure/aws-kube-ci +To file feature requests, bugs, or questions, submit an issue at https://github.com/NVIDIA/gpu-operator/issues -To ensure that the GPU Operator releases can be effectively validated on new platforms, it would be ideal for contributions to make available CI infrastructure (e.g. runners) and associated changes to the CI scripts. +To contribute to the project, file a Pull Request at https://github.com/NVIDIA/gpu-operator/pulls. Contributions do not require explicit contributor license agreements (CLA), but we expect contributors to sign their work. ## Signing your work diff --git a/Makefile b/Makefile index 7743795bb..ee7eaabcf 100644 --- a/Makefile +++ b/Makefile @@ -25,23 +25,20 @@ endif include $(CURDIR)/versions.mk MODULE := github.com/NVIDIA/gpu-operator -CUDA_IMAGE ?= nvcr.io/nvidia/cuda BUILDER_IMAGE ?= golang:$(GOLANG_VERSION) -DIST ?= ubi8 ifeq ($(IMAGE_NAME),) REGISTRY ?= nvcr.io/nvidia/cloud-native IMAGE_NAME := $(REGISTRY)/gpu-operator endif -IMAGE_VERSION := $(VERSION) -IMAGE_TAG ?= $(IMAGE_VERSION)-$(DIST) +IMAGE_TAG ?= $(VERSION) IMAGE = $(IMAGE_NAME):$(IMAGE_TAG) BUILDIMAGE ?= $(IMAGE_NAME):$(IMAGE_TAG)-build OUT_IMAGE_NAME ?= $(IMAGE_NAME) OUT_IMAGE_VERSION ?= $(VERSION) -OUT_IMAGE_TAG = $(OUT_IMAGE_VERSION)-$(DIST) +OUT_IMAGE_TAG = $(OUT_IMAGE_VERSION) OUT_IMAGE = $(OUT_IMAGE_NAME):$(OUT_IMAGE_TAG) # CHANNELS define the bundle channels used in the bundle. @@ -76,16 +73,14 @@ endif all: gpu-operator -# Run tests -ENVTEST_ASSETS_DIR=$(shell pwd)/testbin -test: generate check manifests - mkdir -p ${ENVTEST_ASSETS_DIR} - test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.0/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out - GOOS ?= linux VERSION_PKG = github.com/NVIDIA/gpu-operator/internal/info +PWD = $(shell pwd) +CLIENT_GEN = $(PWD)/bin/client-gen +CONTROLLER_GEN = $(PWD)/bin/controller-gen +KUSTOMIZE = $(PWD)/bin/kustomize + # Build gpu-operator binary gpu-operator: CGO_ENABLED=0 GOOS=$(GOOS) \ @@ -96,15 +91,15 @@ run: generate check manifests go run ./cmd/gpu-operator/... # Install CRDs into a cluster -install: manifests kustomize +install: manifests install-tools $(KUSTOMIZE) build config/crd | kubectl apply -f - # Uninstall CRDs from a cluster -uninstall: manifests kustomize +uninstall: manifests install-tools $(KUSTOMIZE) build config/crd | kubectl delete -f - # Deploy gpu-operator in the configured Kubernetes cluster in ~/.kube/config -deploy: manifests generate-env kustomize +deploy: manifests generate-env install-tools cd config/manager && $(KUSTOMIZE) edit set image gpu-operator=${IMAGE} $(KUSTOMIZE) build config/default | kubectl apply -f - @@ -116,26 +111,25 @@ undeploy: $(KUSTOMIZE) build config/default | kubectl delete -f - # Generate manifests e.g. CRD, RBAC etc. -manifests: controller-gen +manifests: install-tools + @echo "- Generating CRDs from the codebase" $(CONTROLLER_GEN) rbac:roleName=gpu-operator-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases # Generate code -generate: controller-gen +generate: install-tools $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." -# Download controller-gen locally if necessary -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -controller-gen: - @GOBIN=$(PROJECT_DIR)/bin GO111MODULE=on $(GO_CMD) install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 - -# Download kustomize locally if necessary -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: - @GOBIN=$(PROJECT_DIR)/bin GO111MODULE=on $(GO_CMD) install sigs.k8s.io/kustomize/kustomize/v4@v5.1.1 +generate-clientset: install-tools + $(CLIENT_GEN) --go-header-file=$(CURDIR)/hack/boilerplate.go.txt \ + --clientset-name "versioned" \ + --output-dir $(CURDIR)/api \ + --output-pkg $(MODULE)/api \ + --input-base $(CURDIR)/api \ + --input nvidia/v1,nvidia/v1alpha1 # Generate bundle manifests and metadata, then validate generated files. .PHONY: bundle -bundle: manifests kustomize +bundle: manifests install-tools operator-sdk generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image gpu-operator=$(IMAGE) $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) @@ -158,7 +152,7 @@ push-bundle-image: build-bundle-image CMDS := $(patsubst ./cmd/%/,%,$(sort $(dir $(wildcard ./cmd/*/)))) CMD_TARGETS := $(patsubst %,cmd-%, $(CMDS)) -CHECK_TARGETS := lint license-check validate-modules +CHECK_TARGETS := lint license-check validate-modules validate-generated-assets MAKE_TARGETS := build check coverage cmds $(CMD_TARGETS) $(CHECK_TARGETS) DOCKER_TARGETS := $(patsubst %,docker-%, $(MAKE_TARGETS)) .PHONY: $(MAKE_TARGETS) $(DOCKER_TARGETS) @@ -226,6 +220,11 @@ $(CMD_TARGETS): cmd-%: build: go build ./... +sync-crds: + @echo "- Syncing CRDs into Helm and OLM packages..." + cp $(PROJECT_DIR)/config/crd/bases/* $(PROJECT_DIR)/deployments/gpu-operator/crds + cp $(PROJECT_DIR)/config/crd/bases/* $(PROJECT_DIR)/bundle/manifests + validate-modules: @echo "- Verifying that the dependencies have expected content..." go mod verify @@ -244,6 +243,10 @@ validate-helm-values: cmds sed '/^--/d' | \ ./gpuop-cfg validate clusterpolicy --input="-" +validate-generated-assets: manifests generate generate-clientset sync-crds + @echo "- Verifying that the generated code and manifests are in-sync..." + @git diff --exit-code -- api config + COVERAGE_FILE := coverage.out unit-test: build go list -f {{.Dir}} $(MODULE)/... | grep -v /tests/e2e \ @@ -254,8 +257,8 @@ coverage: unit-test go tool cover -func=$(COVERAGE_FILE).no-mocks ##### Public rules ##### -DISTRIBUTIONS := ubi8 -DEFAULT_PUSH_TARGET := ubi8 +DISTRIBUTIONS := ubi9 +DEFAULT_PUSH_TARGET := ubi9 PUSH_TARGETS := $(patsubst %,push-%, $(DISTRIBUTIONS)) BUILD_TARGETS := $(patsubst %,build-%, $(DISTRIBUTIONS)) @@ -276,17 +279,6 @@ $(ALL_TARGETS): %: make -C $(SUBCOMPONENT) $(*) else -# For the default push target we also push a short tag equal to the version. -# We skip this for the development release -DEVEL_RELEASE_IMAGE_VERSION ?= devel -ifneq ($(strip $(VERSION)),$(DEVEL_RELEASE_IMAGE_VERSION)) -push-$(DEFAULT_PUSH_TARGET): push-short -endif - -push-%: DIST = $(*) -push-short: DIST = $(DEFAULT_PUSH_TARGET) - -build-%: DIST = $(*) build-%: DOCKERFILE = $(CURDIR)/docker/Dockerfile $(DISTRIBUTIONS): %: build-% @@ -296,9 +288,6 @@ $(BUILD_TARGETS): build-%: $(DOCKER_BUILD_OPTIONS) \ $(DOCKER_BUILD_PLATFORM_OPTIONS) \ --tag $(IMAGE) \ - --build-arg BASE_DIST="$(DIST)" \ - --build-arg CUDA_IMAGE="$(CUDA_IMAGE)" \ - --build-arg CUDA_VERSION="$(CUDA_VERSION)" \ --build-arg VERSION="$(VERSION)" \ --build-arg BUILDER_IMAGE="$(BUILDER_IMAGE)" \ --build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \ @@ -311,3 +300,8 @@ $(BUILD_TARGETS): build-%: docker-image: OUT_IMAGE ?= $(IMAGE_NAME):$(IMAGE_TAG) docker-image: ${DEFAULT_PUSH_TARGET} endif + +install-tools: + @echo Installing tools from tools.go + export GOBIN=$(PROJECT_DIR)/bin && \ + grep '^\s*_' tools/tools.go | awk '{print $$2}' | xargs -tI % $(GO_CMD) install -mod=readonly -modfile=tools/go.mod % diff --git a/RELEASE.md b/RELEASE.md deleted file mode 100644 index 84a0fbf19..000000000 --- a/RELEASE.md +++ /dev/null @@ -1,54 +0,0 @@ -# Artifacts - -This repository outputs two artifacts: -- The GPU Operator container. -- The GPU Operator helm chart. - -# Versioning - -This repository follows Semantic Versioning 2.0.0 -The artifacts will be versioned as follows: -- **nightly**: 1.0.0-nightly-shortSHA - - The version names contain "nightly". - - Leading number of pre-release version tracked in master. - - build meta data of SHA hash is appended to version string. - - May be buggy - - Features may be removed at any time. - - The API may change in incompatible ways in a later software release without notice. - - Recommended for use in short-lived clusters - - when Docker supports it, we'll use +shortSHA in SemVer 2.0 fashion -- **alpha**: 1.0.0-alpha.N - - The version names contain "alpha". - - May be buggy, enabling features may expose bugs. - - Features may be removed at any time. - - The API may change in incompatible ways in a later software release without notice. - - Recommended for use in short-lived clusters and tech previews -- **beta**: 1.0.0-rc.N - - The version names contain "rc". - - Code is well tested. Using the feature is considered safe. - - Features will not be dropped. - - The API may change in incompatible ways but when this happens we will provided instructions for migrating to the next version. - - Recommended for only non-business-critical uses. -- **stable**: 1.X.Y - - The version follows [SemVer 2.0.0](http://semver.org/) - - Stable versions of features will appear in released software for many subsequent versions. - -*Note: Some of the items were copied from Kubernetes' own API versioning policy: [https://kubernetes.io/docs/concepts/overview/kubernetes-api/](https://kubernetes.io/docs/concepts/overview/kubernetes-api/)* - -**The GPU Operator helm chart MUST be the same as the GPU Operator container.** - -# Nightly Release Process - -After every commit that successfully passes all tests, the following actions are performed: -- The GPU Operator container is persisted on the dockerhub registry (e.g: 1.X.Y-nightly-shortSHA) -- The GPU Operator helm chart is pushed on the repository's github pages (e.g: 1.X.Y-nightly-shortSHA) - -# Release Process - -After a commit that successfully passes all tests, a maintainer tags that commit with the release version (e.g: `1.0.0-alpha.1`): -- The GPU Operator container is persisted on the dockerhub and NGC registry - - The tag for that container is the commit tag -- The GPU Operator helm chart is pushed on the repository's github pages and NGC registry - - The tag for that container is the commit tag -- The Readme should be updated with the changelog -- The helm chart values.yaml and Chart.yaml should be updated with the newer version diff --git a/api/v1/clusterpolicy_types.go b/api/nvidia/v1/clusterpolicy_types.go similarity index 98% rename from api/v1/clusterpolicy_types.go rename to api/nvidia/v1/clusterpolicy_types.go index 44d6c95f3..07e424761 100644 --- a/api/v1/clusterpolicy_types.go +++ b/api/nvidia/v1/clusterpolicy_types.go @@ -34,6 +34,10 @@ import ( // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +const ( + ClusterPolicyCRDName = "ClusterPolicy" +) + // ClusterPolicySpec defines the desired state of ClusterPolicy type ClusterPolicySpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster @@ -88,6 +92,8 @@ type ClusterPolicySpec struct { KataManager KataManagerSpec `json:"kataManager,omitempty"` // CCManager component spec CCManager CCManagerSpec `json:"ccManager,omitempty"` + // HostPaths defines various paths on the host needed by GPU Operator components + HostPaths HostPathsSpec `json:"hostPaths,omitempty"` } // Runtime defines container runtime type @@ -144,6 +150,20 @@ type OperatorSpec struct { UseOpenShiftDriverToolkit *bool `json:"use_ocp_driver_toolkit,omitempty"` } +// HostPathsSpec defines various paths on the host needed by GPU Operator components +type HostPathsSpec struct { + // RootFS represents the path to the root filesystem of the host. + // This is used by components that need to interact with the host filesystem + // and as such this must be a chroot-able filesystem. + // Examples include the MIG Manager and Toolkit Container which may need to + // stop, start, or restart systemd services. + RootFS string `json:"rootFS,omitempty"` + + // DriverInstallDir represents the root at which driver files including libraries, + // config files, and executables can be found. + DriverInstallDir string `json:"driverInstallDir,omitempty"` +} + // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Name of the environment variable. @@ -734,6 +754,11 @@ type DevicePluginSpec struct { // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Configuration for the NVIDIA Device Plugin via the ConfigMap" Config *DevicePluginConfig `json:"config,omitempty"` + + // Optional: MPS related configuration for the NVIDIA Device Plugin + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="MPS related configuration for the NVIDIA Device Plugin" + MPS *MPSConfig `json:"mps,omitempty"` } // DevicePluginConfig defines ConfigMap name for NVIDIA Device Plugin config @@ -752,6 +777,17 @@ type DevicePluginConfig struct { Default string `json:"default,omitempty"` } +// MPSConfig defines MPS related configuration for the NVIDIA Device Plugin +type MPSConfig struct { + // Root defines the MPS root path on the host + // +kubebuilder:validation:Optional + // +kubebuilder:default=/run/nvidia/mps + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="MPS root path on the host" + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:text" + Root string `json:"root,omitempty"` +} + // SandboxDevicePluginSpec defines the properties for the NVIDIA Sandbox Device Plugin deployment type SandboxDevicePluginSpec struct { // Enabled indicates if deployment of NVIDIA Sandbox Device Plugin through operator is enabled @@ -964,7 +1000,7 @@ type DCGMSpec struct { // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:advanced,urn:alm:descriptor:com.tectonic.ui:text" Env []EnvVar `json:"env,omitempty"` - // HostPort represents host port that needs to be bound for DCGM engine (Default: 5555) + // Deprecated: HostPort represents host port that needs to be bound for DCGM engine (Default: 5555) // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Host port to bind for DCGM engine" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:number" HostPort int32 `json:"hostPort,omitempty"` @@ -1643,6 +1679,8 @@ type ClusterPolicyStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } +// +genclient +// +genclient:nonNamespaced // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster diff --git a/api/nvidia/v1/groupversion_info.go b/api/nvidia/v1/groupversion_info.go new file mode 100644 index 000000000..84e81998c --- /dev/null +++ b/api/nvidia/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the clusterpolicy v1 API group +// +kubebuilder:object:generate=true +// +groupName=nvidia.com +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "nvidia.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/nvidia/v1/zz_generated.deepcopy.go b/api/nvidia/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..6d876f675 --- /dev/null +++ b/api/nvidia/v1/zz_generated.deepcopy.go @@ -0,0 +1,1627 @@ +//go:build !ignore_autogenerated + +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + "github.com/NVIDIA/k8s-kata-manager/api/v1alpha1/config" + "github.com/NVIDIA/k8s-operator-libs/api/upgrade/v1alpha1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CCManagerSpec) DeepCopyInto(out *CCManagerSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CCManagerSpec. +func (in *CCManagerSpec) DeepCopy() *CCManagerSpec { + if in == nil { + return nil + } + out := new(CCManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIConfigSpec) DeepCopyInto(out *CDIConfigSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigSpec. +func (in *CDIConfigSpec) DeepCopy() *CDIConfigSpec { + if in == nil { + return nil + } + out := new(CDIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CUDAValidatorSpec) DeepCopyInto(out *CUDAValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CUDAValidatorSpec. +func (in *CUDAValidatorSpec) DeepCopy() *CUDAValidatorSpec { + if in == nil { + return nil + } + out := new(CUDAValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPolicy) DeepCopyInto(out *ClusterPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicy. +func (in *ClusterPolicy) DeepCopy() *ClusterPolicy { + if in == nil { + return nil + } + out := new(ClusterPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPolicyList) DeepCopyInto(out *ClusterPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyList. +func (in *ClusterPolicyList) DeepCopy() *ClusterPolicyList { + if in == nil { + return nil + } + out := new(ClusterPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPolicySpec) DeepCopyInto(out *ClusterPolicySpec) { + *out = *in + in.Operator.DeepCopyInto(&out.Operator) + in.Daemonsets.DeepCopyInto(&out.Daemonsets) + in.Driver.DeepCopyInto(&out.Driver) + in.Toolkit.DeepCopyInto(&out.Toolkit) + in.DevicePlugin.DeepCopyInto(&out.DevicePlugin) + in.DCGMExporter.DeepCopyInto(&out.DCGMExporter) + in.DCGM.DeepCopyInto(&out.DCGM) + in.NodeStatusExporter.DeepCopyInto(&out.NodeStatusExporter) + in.GPUFeatureDiscovery.DeepCopyInto(&out.GPUFeatureDiscovery) + out.MIG = in.MIG + in.MIGManager.DeepCopyInto(&out.MIGManager) + in.PSP.DeepCopyInto(&out.PSP) + in.PSA.DeepCopyInto(&out.PSA) + in.Validator.DeepCopyInto(&out.Validator) + if in.GPUDirectStorage != nil { + in, out := &in.GPUDirectStorage, &out.GPUDirectStorage + *out = new(GPUDirectStorageSpec) + (*in).DeepCopyInto(*out) + } + if in.GDRCopy != nil { + in, out := &in.GDRCopy, &out.GDRCopy + *out = new(GDRCopySpec) + (*in).DeepCopyInto(*out) + } + in.SandboxWorkloads.DeepCopyInto(&out.SandboxWorkloads) + in.VFIOManager.DeepCopyInto(&out.VFIOManager) + in.SandboxDevicePlugin.DeepCopyInto(&out.SandboxDevicePlugin) + in.VGPUManager.DeepCopyInto(&out.VGPUManager) + in.VGPUDeviceManager.DeepCopyInto(&out.VGPUDeviceManager) + in.CDI.DeepCopyInto(&out.CDI) + in.KataManager.DeepCopyInto(&out.KataManager) + in.CCManager.DeepCopyInto(&out.CCManager) + out.HostPaths = in.HostPaths +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicySpec. +func (in *ClusterPolicySpec) DeepCopy() *ClusterPolicySpec { + if in == nil { + return nil + } + out := new(ClusterPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPolicyStatus) DeepCopyInto(out *ClusterPolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyStatus. +func (in *ClusterPolicyStatus) DeepCopy() *ClusterPolicyStatus { + if in == nil { + return nil + } + out := new(ClusterPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerProbeSpec) DeepCopyInto(out *ContainerProbeSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerProbeSpec. +func (in *ContainerProbeSpec) DeepCopy() *ContainerProbeSpec { + if in == nil { + return nil + } + out := new(ContainerProbeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DCGMExporterMetricsConfig) DeepCopyInto(out *DCGMExporterMetricsConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMExporterMetricsConfig. +func (in *DCGMExporterMetricsConfig) DeepCopy() *DCGMExporterMetricsConfig { + if in == nil { + return nil + } + out := new(DCGMExporterMetricsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DCGMExporterServiceMonitorConfig) DeepCopyInto(out *DCGMExporterServiceMonitorConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HonorLabels != nil { + in, out := &in.HonorLabels, &out.HonorLabels + *out = new(bool) + **out = **in + } + if in.AdditionalLabels != nil { + in, out := &in.AdditionalLabels, &out.AdditionalLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Relabelings != nil { + in, out := &in.Relabelings, &out.Relabelings + *out = make([]*monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(monitoringv1.RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMExporterServiceMonitorConfig. +func (in *DCGMExporterServiceMonitorConfig) DeepCopy() *DCGMExporterServiceMonitorConfig { + if in == nil { + return nil + } + out := new(DCGMExporterServiceMonitorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DCGMExporterSpec) DeepCopyInto(out *DCGMExporterSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + if in.MetricsConfig != nil { + in, out := &in.MetricsConfig, &out.MetricsConfig + *out = new(DCGMExporterMetricsConfig) + **out = **in + } + if in.ServiceMonitor != nil { + in, out := &in.ServiceMonitor, &out.ServiceMonitor + *out = new(DCGMExporterServiceMonitorConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMExporterSpec. +func (in *DCGMExporterSpec) DeepCopy() *DCGMExporterSpec { + if in == nil { + return nil + } + out := new(DCGMExporterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DCGMSpec) DeepCopyInto(out *DCGMSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMSpec. +func (in *DCGMSpec) DeepCopy() *DCGMSpec { + if in == nil { + return nil + } + out := new(DCGMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonsetsSpec) DeepCopyInto(out *DaemonsetsSpec) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonsetsSpec. +func (in *DaemonsetsSpec) DeepCopy() *DaemonsetsSpec { + if in == nil { + return nil + } + out := new(DaemonsetsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevicePluginConfig) DeepCopyInto(out *DevicePluginConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevicePluginConfig. +func (in *DevicePluginConfig) DeepCopy() *DevicePluginConfig { + if in == nil { + return nil + } + out := new(DevicePluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevicePluginSpec) DeepCopyInto(out *DevicePluginSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DevicePluginConfig) + **out = **in + } + if in.MPS != nil { + in, out := &in.MPS, &out.MPS + *out = new(MPSConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevicePluginSpec. +func (in *DevicePluginSpec) DeepCopy() *DevicePluginSpec { + if in == nil { + return nil + } + out := new(DevicePluginSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverCertConfigSpec) DeepCopyInto(out *DriverCertConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverCertConfigSpec. +func (in *DriverCertConfigSpec) DeepCopy() *DriverCertConfigSpec { + if in == nil { + return nil + } + out := new(DriverCertConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverLicensingConfigSpec) DeepCopyInto(out *DriverLicensingConfigSpec) { + *out = *in + if in.NLSEnabled != nil { + in, out := &in.NLSEnabled, &out.NLSEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverLicensingConfigSpec. +func (in *DriverLicensingConfigSpec) DeepCopy() *DriverLicensingConfigSpec { + if in == nil { + return nil + } + out := new(DriverLicensingConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverManagerSpec) DeepCopyInto(out *DriverManagerSpec) { + *out = *in + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverManagerSpec. +func (in *DriverManagerSpec) DeepCopy() *DriverManagerSpec { + if in == nil { + return nil + } + out := new(DriverManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverRepoConfigSpec) DeepCopyInto(out *DriverRepoConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRepoConfigSpec. +func (in *DriverRepoConfigSpec) DeepCopy() *DriverRepoConfigSpec { + if in == nil { + return nil + } + out := new(DriverRepoConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { + *out = *in + if in.UseNvidiaDriverCRD != nil { + in, out := &in.UseNvidiaDriverCRD, &out.UseNvidiaDriverCRD + *out = new(bool) + **out = **in + } + if in.UsePrecompiled != nil { + in, out := &in.UsePrecompiled, &out.UsePrecompiled + *out = new(bool) + **out = **in + } + if in.UseOpenKernelModules != nil { + in, out := &in.UseOpenKernelModules, &out.UseOpenKernelModules + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(ContainerProbeSpec) + **out = **in + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(ContainerProbeSpec) + **out = **in + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(ContainerProbeSpec) + **out = **in + } + if in.GPUDirectRDMA != nil { + in, out := &in.GPUDirectRDMA, &out.GPUDirectRDMA + *out = new(GPUDirectRDMASpec) + (*in).DeepCopyInto(*out) + } + if in.UpgradePolicy != nil { + in, out := &in.UpgradePolicy, &out.UpgradePolicy + *out = new(v1alpha1.DriverUpgradePolicySpec) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Manager.DeepCopyInto(&out.Manager) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + if in.RepoConfig != nil { + in, out := &in.RepoConfig, &out.RepoConfig + *out = new(DriverRepoConfigSpec) + **out = **in + } + if in.CertConfig != nil { + in, out := &in.CertConfig, &out.CertConfig + *out = new(DriverCertConfigSpec) + **out = **in + } + if in.LicensingConfig != nil { + in, out := &in.LicensingConfig, &out.LicensingConfig + *out = new(DriverLicensingConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.VirtualTopology != nil { + in, out := &in.VirtualTopology, &out.VirtualTopology + *out = new(VirtualTopologyConfigSpec) + **out = **in + } + if in.KernelModuleConfig != nil { + in, out := &in.KernelModuleConfig, &out.KernelModuleConfig + *out = new(KernelModuleConfigSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. +func (in *DriverSpec) DeepCopy() *DriverSpec { + if in == nil { + return nil + } + out := new(DriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverValidatorSpec) DeepCopyInto(out *DriverValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverValidatorSpec. +func (in *DriverValidatorSpec) DeepCopy() *DriverValidatorSpec { + if in == nil { + return nil + } + out := new(DriverValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GDRCopySpec) DeepCopyInto(out *GDRCopySpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GDRCopySpec. +func (in *GDRCopySpec) DeepCopy() *GDRCopySpec { + if in == nil { + return nil + } + out := new(GDRCopySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPUDirectRDMASpec) DeepCopyInto(out *GPUDirectRDMASpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.UseHostMOFED != nil { + in, out := &in.UseHostMOFED, &out.UseHostMOFED + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUDirectRDMASpec. +func (in *GPUDirectRDMASpec) DeepCopy() *GPUDirectRDMASpec { + if in == nil { + return nil + } + out := new(GPUDirectRDMASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPUDirectStorageSpec) DeepCopyInto(out *GPUDirectStorageSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUDirectStorageSpec. +func (in *GPUDirectStorageSpec) DeepCopy() *GPUDirectStorageSpec { + if in == nil { + return nil + } + out := new(GPUDirectStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPUFeatureDiscoverySpec) DeepCopyInto(out *GPUFeatureDiscoverySpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUFeatureDiscoverySpec. +func (in *GPUFeatureDiscoverySpec) DeepCopy() *GPUFeatureDiscoverySpec { + if in == nil { + return nil + } + out := new(GPUFeatureDiscoverySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathsSpec) DeepCopyInto(out *HostPathsSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathsSpec. +func (in *HostPathsSpec) DeepCopy() *HostPathsSpec { + if in == nil { + return nil + } + out := new(HostPathsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerSpec) DeepCopyInto(out *InitContainerSpec) { + *out = *in + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerSpec. +func (in *InitContainerSpec) DeepCopy() *InitContainerSpec { + if in == nil { + return nil + } + out := new(InitContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KataManagerSpec) DeepCopyInto(out *KataManagerSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(config.Config) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KataManagerSpec. +func (in *KataManagerSpec) DeepCopy() *KataManagerSpec { + if in == nil { + return nil + } + out := new(KataManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelModuleConfigSpec) DeepCopyInto(out *KernelModuleConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelModuleConfigSpec. +func (in *KernelModuleConfigSpec) DeepCopy() *KernelModuleConfigSpec { + if in == nil { + return nil + } + out := new(KernelModuleConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MIGGPUClientsConfigSpec) DeepCopyInto(out *MIGGPUClientsConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGGPUClientsConfigSpec. +func (in *MIGGPUClientsConfigSpec) DeepCopy() *MIGGPUClientsConfigSpec { + if in == nil { + return nil + } + out := new(MIGGPUClientsConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MIGManagerSpec) DeepCopyInto(out *MIGManagerSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(MIGPartedConfigSpec) + **out = **in + } + if in.GPUClientsConfig != nil { + in, out := &in.GPUClientsConfig, &out.GPUClientsConfig + *out = new(MIGGPUClientsConfigSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGManagerSpec. +func (in *MIGManagerSpec) DeepCopy() *MIGManagerSpec { + if in == nil { + return nil + } + out := new(MIGManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MIGPartedConfigSpec) DeepCopyInto(out *MIGPartedConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGPartedConfigSpec. +func (in *MIGPartedConfigSpec) DeepCopy() *MIGPartedConfigSpec { + if in == nil { + return nil + } + out := new(MIGPartedConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MIGSpec) DeepCopyInto(out *MIGSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGSpec. +func (in *MIGSpec) DeepCopy() *MIGSpec { + if in == nil { + return nil + } + out := new(MIGSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MPSConfig) DeepCopyInto(out *MPSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MPSConfig. +func (in *MPSConfig) DeepCopy() *MPSConfig { + if in == nil { + return nil + } + out := new(MPSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatusExporterSpec) DeepCopyInto(out *NodeStatusExporterSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatusExporterSpec. +func (in *NodeStatusExporterSpec) DeepCopy() *NodeStatusExporterSpec { + if in == nil { + return nil + } + out := new(NodeStatusExporterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + in.InitContainer.DeepCopyInto(&out.InitContainer) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UseOpenShiftDriverToolkit != nil { + in, out := &in.UseOpenShiftDriverToolkit, &out.UseOpenShiftDriverToolkit + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PSASpec) DeepCopyInto(out *PSASpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PSASpec. +func (in *PSASpec) DeepCopy() *PSASpec { + if in == nil { + return nil + } + out := new(PSASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PSPSpec) DeepCopyInto(out *PSPSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PSPSpec. +func (in *PSPSpec) DeepCopy() *PSPSpec { + if in == nil { + return nil + } + out := new(PSPSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginValidatorSpec) DeepCopyInto(out *PluginValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginValidatorSpec. +func (in *PluginValidatorSpec) DeepCopy() *PluginValidatorSpec { + if in == nil { + return nil + } + out := new(PluginValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateSpec) DeepCopyInto(out *RollingUpdateSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateSpec. +func (in *RollingUpdateSpec) DeepCopy() *RollingUpdateSpec { + if in == nil { + return nil + } + out := new(RollingUpdateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SandboxDevicePluginSpec) DeepCopyInto(out *SandboxDevicePluginSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SandboxDevicePluginSpec. +func (in *SandboxDevicePluginSpec) DeepCopy() *SandboxDevicePluginSpec { + if in == nil { + return nil + } + out := new(SandboxDevicePluginSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SandboxWorkloadsSpec) DeepCopyInto(out *SandboxWorkloadsSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SandboxWorkloadsSpec. +func (in *SandboxWorkloadsSpec) DeepCopy() *SandboxWorkloadsSpec { + if in == nil { + return nil + } + out := new(SandboxWorkloadsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ToolkitSpec) DeepCopyInto(out *ToolkitSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToolkitSpec. +func (in *ToolkitSpec) DeepCopy() *ToolkitSpec { + if in == nil { + return nil + } + out := new(ToolkitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ToolkitValidatorSpec) DeepCopyInto(out *ToolkitValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToolkitValidatorSpec. +func (in *ToolkitValidatorSpec) DeepCopy() *ToolkitValidatorSpec { + if in == nil { + return nil + } + out := new(ToolkitValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VFIOManagerSpec) DeepCopyInto(out *VFIOManagerSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + in.DriverManager.DeepCopyInto(&out.DriverManager) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFIOManagerSpec. +func (in *VFIOManagerSpec) DeepCopy() *VFIOManagerSpec { + if in == nil { + return nil + } + out := new(VFIOManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VFIOPCIValidatorSpec) DeepCopyInto(out *VFIOPCIValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFIOPCIValidatorSpec. +func (in *VFIOPCIValidatorSpec) DeepCopy() *VFIOPCIValidatorSpec { + if in == nil { + return nil + } + out := new(VFIOPCIValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUDeviceManagerSpec) DeepCopyInto(out *VGPUDeviceManagerSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(VGPUDevicesConfigSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDeviceManagerSpec. +func (in *VGPUDeviceManagerSpec) DeepCopy() *VGPUDeviceManagerSpec { + if in == nil { + return nil + } + out := new(VGPUDeviceManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUDevicesConfigSpec) DeepCopyInto(out *VGPUDevicesConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDevicesConfigSpec. +func (in *VGPUDevicesConfigSpec) DeepCopy() *VGPUDevicesConfigSpec { + if in == nil { + return nil + } + out := new(VGPUDevicesConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUDevicesValidatorSpec) DeepCopyInto(out *VGPUDevicesValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDevicesValidatorSpec. +func (in *VGPUDevicesValidatorSpec) DeepCopy() *VGPUDevicesValidatorSpec { + if in == nil { + return nil + } + out := new(VGPUDevicesValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUManagerSpec) DeepCopyInto(out *VGPUManagerSpec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } + in.DriverManager.DeepCopyInto(&out.DriverManager) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUManagerSpec. +func (in *VGPUManagerSpec) DeepCopy() *VGPUManagerSpec { + if in == nil { + return nil + } + out := new(VGPUManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUManagerValidatorSpec) DeepCopyInto(out *VGPUManagerValidatorSpec) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUManagerValidatorSpec. +func (in *VGPUManagerValidatorSpec) DeepCopy() *VGPUManagerValidatorSpec { + if in == nil { + return nil + } + out := new(VGPUManagerValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatorSpec) DeepCopyInto(out *ValidatorSpec) { + *out = *in + in.Plugin.DeepCopyInto(&out.Plugin) + in.Toolkit.DeepCopyInto(&out.Toolkit) + in.Driver.DeepCopyInto(&out.Driver) + in.CUDA.DeepCopyInto(&out.CUDA) + in.VFIOPCI.DeepCopyInto(&out.VFIOPCI) + in.VGPUManager.DeepCopyInto(&out.VGPUManager) + in.VGPUDevices.DeepCopyInto(&out.VGPUDevices) + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatorSpec. +func (in *ValidatorSpec) DeepCopy() *ValidatorSpec { + if in == nil { + return nil + } + out := new(ValidatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualTopologyConfigSpec) DeepCopyInto(out *VirtualTopologyConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualTopologyConfigSpec. +func (in *VirtualTopologyConfigSpec) DeepCopy() *VirtualTopologyConfigSpec { + if in == nil { + return nil + } + out := new(VirtualTopologyConfigSpec) + in.DeepCopyInto(out) + return out +} diff --git a/api/nvidia/v1alpha1/groupversion_info.go b/api/nvidia/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..f9e561289 --- /dev/null +++ b/api/nvidia/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Package v1alpha1 contains API Schema definitions for the nvidia v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=nvidia.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "nvidia.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha1/nvidiadriver_types.go b/api/nvidia/v1alpha1/nvidiadriver_types.go similarity index 99% rename from api/v1alpha1/nvidiadriver_types.go rename to api/nvidia/v1alpha1/nvidiadriver_types.go index 489c3394f..86bae0b48 100644 --- a/api/v1alpha1/nvidiadriver_types.go +++ b/api/nvidia/v1alpha1/nvidiadriver_types.go @@ -462,6 +462,8 @@ type NVIDIADriverStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } +// +genclient +// +genclient:nonNamespaced //+kubebuilder:object:root=true //+kubebuilder:subresource:status //+kubebuilder:resource:scope=Cluster,shortName={"nvd","nvdriver","nvdrivers"} diff --git a/api/v1alpha1/nvidiadriver_types_test.go b/api/nvidia/v1alpha1/nvidiadriver_types_test.go similarity index 100% rename from api/v1alpha1/nvidiadriver_types_test.go rename to api/nvidia/v1alpha1/nvidiadriver_types_test.go diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/nvidia/v1alpha1/zz_generated.deepcopy.go similarity index 100% rename from api/v1alpha1/zz_generated.deepcopy.go rename to api/nvidia/v1alpha1/zz_generated.deepcopy.go diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go deleted file mode 100644 index 781d71c53..000000000 --- a/api/v1/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1 contains API Schema definitions for the clusterpolicy v1 API group -// +kubebuilder:object:generate=true -// +groupName=nvidia.com -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "nvidia.com", Version: "v1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go deleted file mode 100644 index d80b36109..000000000 --- a/api/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1591 +0,0 @@ -//go:build !ignore_autogenerated - -/** -# Copyright (c) NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -**/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1 - -import ( - "github.com/NVIDIA/k8s-kata-manager/api/v1alpha1/config" - "github.com/NVIDIA/k8s-operator-libs/api/upgrade/v1alpha1" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CCManagerSpec) DeepCopyInto(out *CCManagerSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CCManagerSpec. -func (in *CCManagerSpec) DeepCopy() *CCManagerSpec { - if in == nil { - return nil - } - out := new(CCManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDIConfigSpec) DeepCopyInto(out *CDIConfigSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigSpec. -func (in *CDIConfigSpec) DeepCopy() *CDIConfigSpec { - if in == nil { - return nil - } - out := new(CDIConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CUDAValidatorSpec) DeepCopyInto(out *CUDAValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CUDAValidatorSpec. -func (in *CUDAValidatorSpec) DeepCopy() *CUDAValidatorSpec { - if in == nil { - return nil - } - out := new(CUDAValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterPolicy) DeepCopyInto(out *ClusterPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicy. -func (in *ClusterPolicy) DeepCopy() *ClusterPolicy { - if in == nil { - return nil - } - out := new(ClusterPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterPolicyList) DeepCopyInto(out *ClusterPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyList. -func (in *ClusterPolicyList) DeepCopy() *ClusterPolicyList { - if in == nil { - return nil - } - out := new(ClusterPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterPolicySpec) DeepCopyInto(out *ClusterPolicySpec) { - *out = *in - in.Operator.DeepCopyInto(&out.Operator) - in.Daemonsets.DeepCopyInto(&out.Daemonsets) - in.Driver.DeepCopyInto(&out.Driver) - in.Toolkit.DeepCopyInto(&out.Toolkit) - in.DevicePlugin.DeepCopyInto(&out.DevicePlugin) - in.DCGMExporter.DeepCopyInto(&out.DCGMExporter) - in.DCGM.DeepCopyInto(&out.DCGM) - in.NodeStatusExporter.DeepCopyInto(&out.NodeStatusExporter) - in.GPUFeatureDiscovery.DeepCopyInto(&out.GPUFeatureDiscovery) - out.MIG = in.MIG - in.MIGManager.DeepCopyInto(&out.MIGManager) - in.PSP.DeepCopyInto(&out.PSP) - in.PSA.DeepCopyInto(&out.PSA) - in.Validator.DeepCopyInto(&out.Validator) - if in.GPUDirectStorage != nil { - in, out := &in.GPUDirectStorage, &out.GPUDirectStorage - *out = new(GPUDirectStorageSpec) - (*in).DeepCopyInto(*out) - } - if in.GDRCopy != nil { - in, out := &in.GDRCopy, &out.GDRCopy - *out = new(GDRCopySpec) - (*in).DeepCopyInto(*out) - } - in.SandboxWorkloads.DeepCopyInto(&out.SandboxWorkloads) - in.VFIOManager.DeepCopyInto(&out.VFIOManager) - in.SandboxDevicePlugin.DeepCopyInto(&out.SandboxDevicePlugin) - in.VGPUManager.DeepCopyInto(&out.VGPUManager) - in.VGPUDeviceManager.DeepCopyInto(&out.VGPUDeviceManager) - in.CDI.DeepCopyInto(&out.CDI) - in.KataManager.DeepCopyInto(&out.KataManager) - in.CCManager.DeepCopyInto(&out.CCManager) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicySpec. -func (in *ClusterPolicySpec) DeepCopy() *ClusterPolicySpec { - if in == nil { - return nil - } - out := new(ClusterPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterPolicyStatus) DeepCopyInto(out *ClusterPolicyStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]metav1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyStatus. -func (in *ClusterPolicyStatus) DeepCopy() *ClusterPolicyStatus { - if in == nil { - return nil - } - out := new(ClusterPolicyStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerProbeSpec) DeepCopyInto(out *ContainerProbeSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerProbeSpec. -func (in *ContainerProbeSpec) DeepCopy() *ContainerProbeSpec { - if in == nil { - return nil - } - out := new(ContainerProbeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DCGMExporterMetricsConfig) DeepCopyInto(out *DCGMExporterMetricsConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMExporterMetricsConfig. -func (in *DCGMExporterMetricsConfig) DeepCopy() *DCGMExporterMetricsConfig { - if in == nil { - return nil - } - out := new(DCGMExporterMetricsConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DCGMExporterServiceMonitorConfig) DeepCopyInto(out *DCGMExporterServiceMonitorConfig) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.HonorLabels != nil { - in, out := &in.HonorLabels, &out.HonorLabels - *out = new(bool) - **out = **in - } - if in.AdditionalLabels != nil { - in, out := &in.AdditionalLabels, &out.AdditionalLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Relabelings != nil { - in, out := &in.Relabelings, &out.Relabelings - *out = make([]*monitoringv1.RelabelConfig, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(monitoringv1.RelabelConfig) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMExporterServiceMonitorConfig. -func (in *DCGMExporterServiceMonitorConfig) DeepCopy() *DCGMExporterServiceMonitorConfig { - if in == nil { - return nil - } - out := new(DCGMExporterServiceMonitorConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DCGMExporterSpec) DeepCopyInto(out *DCGMExporterSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.MetricsConfig != nil { - in, out := &in.MetricsConfig, &out.MetricsConfig - *out = new(DCGMExporterMetricsConfig) - **out = **in - } - if in.ServiceMonitor != nil { - in, out := &in.ServiceMonitor, &out.ServiceMonitor - *out = new(DCGMExporterServiceMonitorConfig) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMExporterSpec. -func (in *DCGMExporterSpec) DeepCopy() *DCGMExporterSpec { - if in == nil { - return nil - } - out := new(DCGMExporterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DCGMSpec) DeepCopyInto(out *DCGMSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DCGMSpec. -func (in *DCGMSpec) DeepCopy() *DCGMSpec { - if in == nil { - return nil - } - out := new(DCGMSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonsetsSpec) DeepCopyInto(out *DaemonsetsSpec) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonsetsSpec. -func (in *DaemonsetsSpec) DeepCopy() *DaemonsetsSpec { - if in == nil { - return nil - } - out := new(DaemonsetsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevicePluginConfig) DeepCopyInto(out *DevicePluginConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevicePluginConfig. -func (in *DevicePluginConfig) DeepCopy() *DevicePluginConfig { - if in == nil { - return nil - } - out := new(DevicePluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevicePluginSpec) DeepCopyInto(out *DevicePluginSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(DevicePluginConfig) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevicePluginSpec. -func (in *DevicePluginSpec) DeepCopy() *DevicePluginSpec { - if in == nil { - return nil - } - out := new(DevicePluginSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverCertConfigSpec) DeepCopyInto(out *DriverCertConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverCertConfigSpec. -func (in *DriverCertConfigSpec) DeepCopy() *DriverCertConfigSpec { - if in == nil { - return nil - } - out := new(DriverCertConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverLicensingConfigSpec) DeepCopyInto(out *DriverLicensingConfigSpec) { - *out = *in - if in.NLSEnabled != nil { - in, out := &in.NLSEnabled, &out.NLSEnabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverLicensingConfigSpec. -func (in *DriverLicensingConfigSpec) DeepCopy() *DriverLicensingConfigSpec { - if in == nil { - return nil - } - out := new(DriverLicensingConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverManagerSpec) DeepCopyInto(out *DriverManagerSpec) { - *out = *in - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverManagerSpec. -func (in *DriverManagerSpec) DeepCopy() *DriverManagerSpec { - if in == nil { - return nil - } - out := new(DriverManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverRepoConfigSpec) DeepCopyInto(out *DriverRepoConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRepoConfigSpec. -func (in *DriverRepoConfigSpec) DeepCopy() *DriverRepoConfigSpec { - if in == nil { - return nil - } - out := new(DriverRepoConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { - *out = *in - if in.UseNvidiaDriverCRD != nil { - in, out := &in.UseNvidiaDriverCRD, &out.UseNvidiaDriverCRD - *out = new(bool) - **out = **in - } - if in.UsePrecompiled != nil { - in, out := &in.UsePrecompiled, &out.UsePrecompiled - *out = new(bool) - **out = **in - } - if in.UseOpenKernelModules != nil { - in, out := &in.UseOpenKernelModules, &out.UseOpenKernelModules - *out = new(bool) - **out = **in - } - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.StartupProbe != nil { - in, out := &in.StartupProbe, &out.StartupProbe - *out = new(ContainerProbeSpec) - **out = **in - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(ContainerProbeSpec) - **out = **in - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(ContainerProbeSpec) - **out = **in - } - if in.GPUDirectRDMA != nil { - in, out := &in.GPUDirectRDMA, &out.GPUDirectRDMA - *out = new(GPUDirectRDMASpec) - (*in).DeepCopyInto(*out) - } - if in.UpgradePolicy != nil { - in, out := &in.UpgradePolicy, &out.UpgradePolicy - *out = new(v1alpha1.DriverUpgradePolicySpec) - (*in).DeepCopyInto(*out) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Manager.DeepCopyInto(&out.Manager) - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.RepoConfig != nil { - in, out := &in.RepoConfig, &out.RepoConfig - *out = new(DriverRepoConfigSpec) - **out = **in - } - if in.CertConfig != nil { - in, out := &in.CertConfig, &out.CertConfig - *out = new(DriverCertConfigSpec) - **out = **in - } - if in.LicensingConfig != nil { - in, out := &in.LicensingConfig, &out.LicensingConfig - *out = new(DriverLicensingConfigSpec) - (*in).DeepCopyInto(*out) - } - if in.VirtualTopology != nil { - in, out := &in.VirtualTopology, &out.VirtualTopology - *out = new(VirtualTopologyConfigSpec) - **out = **in - } - if in.KernelModuleConfig != nil { - in, out := &in.KernelModuleConfig, &out.KernelModuleConfig - *out = new(KernelModuleConfigSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. -func (in *DriverSpec) DeepCopy() *DriverSpec { - if in == nil { - return nil - } - out := new(DriverSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverValidatorSpec) DeepCopyInto(out *DriverValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverValidatorSpec. -func (in *DriverValidatorSpec) DeepCopy() *DriverValidatorSpec { - if in == nil { - return nil - } - out := new(DriverValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GDRCopySpec) DeepCopyInto(out *GDRCopySpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GDRCopySpec. -func (in *GDRCopySpec) DeepCopy() *GDRCopySpec { - if in == nil { - return nil - } - out := new(GDRCopySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GPUDirectRDMASpec) DeepCopyInto(out *GPUDirectRDMASpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.UseHostMOFED != nil { - in, out := &in.UseHostMOFED, &out.UseHostMOFED - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUDirectRDMASpec. -func (in *GPUDirectRDMASpec) DeepCopy() *GPUDirectRDMASpec { - if in == nil { - return nil - } - out := new(GPUDirectRDMASpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GPUDirectStorageSpec) DeepCopyInto(out *GPUDirectStorageSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUDirectStorageSpec. -func (in *GPUDirectStorageSpec) DeepCopy() *GPUDirectStorageSpec { - if in == nil { - return nil - } - out := new(GPUDirectStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GPUFeatureDiscoverySpec) DeepCopyInto(out *GPUFeatureDiscoverySpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUFeatureDiscoverySpec. -func (in *GPUFeatureDiscoverySpec) DeepCopy() *GPUFeatureDiscoverySpec { - if in == nil { - return nil - } - out := new(GPUFeatureDiscoverySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitContainerSpec) DeepCopyInto(out *InitContainerSpec) { - *out = *in - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerSpec. -func (in *InitContainerSpec) DeepCopy() *InitContainerSpec { - if in == nil { - return nil - } - out := new(InitContainerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KataManagerSpec) DeepCopyInto(out *KataManagerSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(config.Config) - (*in).DeepCopyInto(*out) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KataManagerSpec. -func (in *KataManagerSpec) DeepCopy() *KataManagerSpec { - if in == nil { - return nil - } - out := new(KataManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KernelModuleConfigSpec) DeepCopyInto(out *KernelModuleConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelModuleConfigSpec. -func (in *KernelModuleConfigSpec) DeepCopy() *KernelModuleConfigSpec { - if in == nil { - return nil - } - out := new(KernelModuleConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MIGGPUClientsConfigSpec) DeepCopyInto(out *MIGGPUClientsConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGGPUClientsConfigSpec. -func (in *MIGGPUClientsConfigSpec) DeepCopy() *MIGGPUClientsConfigSpec { - if in == nil { - return nil - } - out := new(MIGGPUClientsConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MIGManagerSpec) DeepCopyInto(out *MIGManagerSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(MIGPartedConfigSpec) - **out = **in - } - if in.GPUClientsConfig != nil { - in, out := &in.GPUClientsConfig, &out.GPUClientsConfig - *out = new(MIGGPUClientsConfigSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGManagerSpec. -func (in *MIGManagerSpec) DeepCopy() *MIGManagerSpec { - if in == nil { - return nil - } - out := new(MIGManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MIGPartedConfigSpec) DeepCopyInto(out *MIGPartedConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGPartedConfigSpec. -func (in *MIGPartedConfigSpec) DeepCopy() *MIGPartedConfigSpec { - if in == nil { - return nil - } - out := new(MIGPartedConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MIGSpec) DeepCopyInto(out *MIGSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MIGSpec. -func (in *MIGSpec) DeepCopy() *MIGSpec { - if in == nil { - return nil - } - out := new(MIGSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatusExporterSpec) DeepCopyInto(out *NodeStatusExporterSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatusExporterSpec. -func (in *NodeStatusExporterSpec) DeepCopy() *NodeStatusExporterSpec { - if in == nil { - return nil - } - out := new(NodeStatusExporterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { - *out = *in - in.InitContainer.DeepCopyInto(&out.InitContainer) - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.UseOpenShiftDriverToolkit != nil { - in, out := &in.UseOpenShiftDriverToolkit, &out.UseOpenShiftDriverToolkit - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. -func (in *OperatorSpec) DeepCopy() *OperatorSpec { - if in == nil { - return nil - } - out := new(OperatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PSASpec) DeepCopyInto(out *PSASpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PSASpec. -func (in *PSASpec) DeepCopy() *PSASpec { - if in == nil { - return nil - } - out := new(PSASpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PSPSpec) DeepCopyInto(out *PSPSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PSPSpec. -func (in *PSPSpec) DeepCopy() *PSPSpec { - if in == nil { - return nil - } - out := new(PSPSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginValidatorSpec) DeepCopyInto(out *PluginValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginValidatorSpec. -func (in *PluginValidatorSpec) DeepCopy() *PluginValidatorSpec { - if in == nil { - return nil - } - out := new(PluginValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(corev1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(corev1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateSpec) DeepCopyInto(out *RollingUpdateSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateSpec. -func (in *RollingUpdateSpec) DeepCopy() *RollingUpdateSpec { - if in == nil { - return nil - } - out := new(RollingUpdateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SandboxDevicePluginSpec) DeepCopyInto(out *SandboxDevicePluginSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SandboxDevicePluginSpec. -func (in *SandboxDevicePluginSpec) DeepCopy() *SandboxDevicePluginSpec { - if in == nil { - return nil - } - out := new(SandboxDevicePluginSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SandboxWorkloadsSpec) DeepCopyInto(out *SandboxWorkloadsSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SandboxWorkloadsSpec. -func (in *SandboxWorkloadsSpec) DeepCopy() *SandboxWorkloadsSpec { - if in == nil { - return nil - } - out := new(SandboxWorkloadsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ToolkitSpec) DeepCopyInto(out *ToolkitSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToolkitSpec. -func (in *ToolkitSpec) DeepCopy() *ToolkitSpec { - if in == nil { - return nil - } - out := new(ToolkitSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ToolkitValidatorSpec) DeepCopyInto(out *ToolkitValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToolkitValidatorSpec. -func (in *ToolkitValidatorSpec) DeepCopy() *ToolkitValidatorSpec { - if in == nil { - return nil - } - out := new(ToolkitValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VFIOManagerSpec) DeepCopyInto(out *VFIOManagerSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - in.DriverManager.DeepCopyInto(&out.DriverManager) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFIOManagerSpec. -func (in *VFIOManagerSpec) DeepCopy() *VFIOManagerSpec { - if in == nil { - return nil - } - out := new(VFIOManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VFIOPCIValidatorSpec) DeepCopyInto(out *VFIOPCIValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VFIOPCIValidatorSpec. -func (in *VFIOPCIValidatorSpec) DeepCopy() *VFIOPCIValidatorSpec { - if in == nil { - return nil - } - out := new(VFIOPCIValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VGPUDeviceManagerSpec) DeepCopyInto(out *VGPUDeviceManagerSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(VGPUDevicesConfigSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDeviceManagerSpec. -func (in *VGPUDeviceManagerSpec) DeepCopy() *VGPUDeviceManagerSpec { - if in == nil { - return nil - } - out := new(VGPUDeviceManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VGPUDevicesConfigSpec) DeepCopyInto(out *VGPUDevicesConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDevicesConfigSpec. -func (in *VGPUDevicesConfigSpec) DeepCopy() *VGPUDevicesConfigSpec { - if in == nil { - return nil - } - out := new(VGPUDevicesConfigSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VGPUDevicesValidatorSpec) DeepCopyInto(out *VGPUDevicesValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDevicesValidatorSpec. -func (in *VGPUDevicesValidatorSpec) DeepCopy() *VGPUDevicesValidatorSpec { - if in == nil { - return nil - } - out := new(VGPUDevicesValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VGPUManagerSpec) DeepCopyInto(out *VGPUManagerSpec) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - in.DriverManager.DeepCopyInto(&out.DriverManager) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUManagerSpec. -func (in *VGPUManagerSpec) DeepCopy() *VGPUManagerSpec { - if in == nil { - return nil - } - out := new(VGPUManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VGPUManagerValidatorSpec) DeepCopyInto(out *VGPUManagerValidatorSpec) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUManagerValidatorSpec. -func (in *VGPUManagerValidatorSpec) DeepCopy() *VGPUManagerValidatorSpec { - if in == nil { - return nil - } - out := new(VGPUManagerValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatorSpec) DeepCopyInto(out *ValidatorSpec) { - *out = *in - in.Plugin.DeepCopyInto(&out.Plugin) - in.Toolkit.DeepCopyInto(&out.Toolkit) - in.Driver.DeepCopyInto(&out.Driver) - in.CUDA.DeepCopyInto(&out.CUDA) - in.VFIOPCI.DeepCopyInto(&out.VFIOPCI) - in.VGPUManager.DeepCopyInto(&out.VGPUManager) - in.VGPUDevices.DeepCopyInto(&out.VGPUDevices) - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatorSpec. -func (in *ValidatorSpec) DeepCopy() *ValidatorSpec { - if in == nil { - return nil - } - out := new(ValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualTopologyConfigSpec) DeepCopyInto(out *VirtualTopologyConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualTopologyConfigSpec. -func (in *VirtualTopologyConfigSpec) DeepCopy() *VirtualTopologyConfigSpec { - if in == nil { - return nil - } - out := new(VirtualTopologyConfigSpec) - in.DeepCopyInto(out) - return out -} diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go deleted file mode 100644 index 9c70d751f..000000000 --- a/api/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/** -# Copyright (c) NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -**/ - -// Package v1alpha1 contains API Schema definitions for the nvidia v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=nvidia.com -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "nvidia.com", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/api/versioned/clientset.go b/api/versioned/clientset.go new file mode 100644 index 000000000..539960f0f --- /dev/null +++ b/api/versioned/clientset.go @@ -0,0 +1,133 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + nvidiav1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + NvidiaV1() nvidiav1.NvidiaV1Interface + NvidiaV1alpha1() nvidiav1alpha1.NvidiaV1alpha1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + nvidiaV1 *nvidiav1.NvidiaV1Client + nvidiaV1alpha1 *nvidiav1alpha1.NvidiaV1alpha1Client +} + +// NvidiaV1 retrieves the NvidiaV1Client +func (c *Clientset) NvidiaV1() nvidiav1.NvidiaV1Interface { + return c.nvidiaV1 +} + +// NvidiaV1alpha1 retrieves the NvidiaV1alpha1Client +func (c *Clientset) NvidiaV1alpha1() nvidiav1alpha1.NvidiaV1alpha1Interface { + return c.nvidiaV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.nvidiaV1, err = nvidiav1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.nvidiaV1alpha1, err = nvidiav1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.nvidiaV1 = nvidiav1.New(c) + cs.nvidiaV1alpha1 = nvidiav1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/api/versioned/fake/clientset_generated.go b/api/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..ed3bde5a5 --- /dev/null +++ b/api/versioned/fake/clientset_generated.go @@ -0,0 +1,96 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/NVIDIA/gpu-operator/api/versioned" + nvidiav1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1" + fakenvidiav1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1/fake" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1alpha1" + fakenvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// NvidiaV1 retrieves the NvidiaV1Client +func (c *Clientset) NvidiaV1() nvidiav1.NvidiaV1Interface { + return &fakenvidiav1.FakeNvidiaV1{Fake: &c.Fake} +} + +// NvidiaV1alpha1 retrieves the NvidiaV1alpha1Client +func (c *Clientset) NvidiaV1alpha1() nvidiav1alpha1.NvidiaV1alpha1Interface { + return &fakenvidiav1alpha1.FakeNvidiaV1alpha1{Fake: &c.Fake} +} diff --git a/api/versioned/fake/doc.go b/api/versioned/fake/doc.go new file mode 100644 index 000000000..75ffe4d8f --- /dev/null +++ b/api/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/api/versioned/fake/register.go b/api/versioned/fake/register.go new file mode 100644 index 000000000..d1afb9205 --- /dev/null +++ b/api/versioned/fake/register.go @@ -0,0 +1,58 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + nvidiav1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + nvidiav1.AddToScheme, + nvidiav1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/api/versioned/scheme/doc.go b/api/versioned/scheme/doc.go new file mode 100644 index 000000000..161d7caf5 --- /dev/null +++ b/api/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/api/versioned/scheme/register.go b/api/versioned/scheme/register.go new file mode 100644 index 000000000..52289fe8c --- /dev/null +++ b/api/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + nvidiav1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + nvidiav1.AddToScheme, + nvidiav1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/api/versioned/typed/nvidia/v1/clusterpolicy.go b/api/versioned/typed/nvidia/v1/clusterpolicy.go new file mode 100644 index 000000000..29d5aa390 --- /dev/null +++ b/api/versioned/typed/nvidia/v1/clusterpolicy.go @@ -0,0 +1,69 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + scheme "github.com/NVIDIA/gpu-operator/api/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterPoliciesGetter has a method to return a ClusterPolicyInterface. +// A group's client should implement this interface. +type ClusterPoliciesGetter interface { + ClusterPolicies() ClusterPolicyInterface +} + +// ClusterPolicyInterface has methods to work with ClusterPolicy resources. +type ClusterPolicyInterface interface { + Create(ctx context.Context, clusterPolicy *v1.ClusterPolicy, opts metav1.CreateOptions) (*v1.ClusterPolicy, error) + Update(ctx context.Context, clusterPolicy *v1.ClusterPolicy, opts metav1.UpdateOptions) (*v1.ClusterPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterPolicy *v1.ClusterPolicy, opts metav1.UpdateOptions) (*v1.ClusterPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterPolicy, err error) + ClusterPolicyExpansion +} + +// clusterPolicies implements ClusterPolicyInterface +type clusterPolicies struct { + *gentype.ClientWithList[*v1.ClusterPolicy, *v1.ClusterPolicyList] +} + +// newClusterPolicies returns a ClusterPolicies +func newClusterPolicies(c *NvidiaV1Client) *clusterPolicies { + return &clusterPolicies{ + gentype.NewClientWithList[*v1.ClusterPolicy, *v1.ClusterPolicyList]( + "clusterpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterPolicy { return &v1.ClusterPolicy{} }, + func() *v1.ClusterPolicyList { return &v1.ClusterPolicyList{} }), + } +} diff --git a/api/versioned/typed/nvidia/v1/doc.go b/api/versioned/typed/nvidia/v1/doc.go new file mode 100644 index 000000000..fb431d1b5 --- /dev/null +++ b/api/versioned/typed/nvidia/v1/doc.go @@ -0,0 +1,20 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/api/versioned/typed/nvidia/v1/fake/doc.go b/api/versioned/typed/nvidia/v1/fake/doc.go new file mode 100644 index 000000000..a8f211f18 --- /dev/null +++ b/api/versioned/typed/nvidia/v1/fake/doc.go @@ -0,0 +1,20 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/api/versioned/typed/nvidia/v1/fake/fake_clusterpolicy.go b/api/versioned/typed/nvidia/v1/fake/fake_clusterpolicy.go new file mode 100644 index 000000000..e7bb37f19 --- /dev/null +++ b/api/versioned/typed/nvidia/v1/fake/fake_clusterpolicy.go @@ -0,0 +1,138 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterPolicies implements ClusterPolicyInterface +type FakeClusterPolicies struct { + Fake *FakeNvidiaV1 +} + +var clusterpoliciesResource = v1.SchemeGroupVersion.WithResource("clusterpolicies") + +var clusterpoliciesKind = v1.SchemeGroupVersion.WithKind("ClusterPolicy") + +// Get takes name of the clusterPolicy, and returns the corresponding clusterPolicy object, and an error if there is any. +func (c *FakeClusterPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterPolicy, err error) { + emptyResult := &v1.ClusterPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(clusterpoliciesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterPolicy), err +} + +// List takes label and field selectors, and returns the list of ClusterPolicies that match those selectors. +func (c *FakeClusterPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterPolicyList, err error) { + emptyResult := &v1.ClusterPolicyList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(clusterpoliciesResource, clusterpoliciesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ClusterPolicyList{ListMeta: obj.(*v1.ClusterPolicyList).ListMeta} + for _, item := range obj.(*v1.ClusterPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterPolicies. +func (c *FakeClusterPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterpoliciesResource, opts)) +} + +// Create takes the representation of a clusterPolicy and creates it. Returns the server's representation of the clusterPolicy, and an error, if there is any. +func (c *FakeClusterPolicies) Create(ctx context.Context, clusterPolicy *v1.ClusterPolicy, opts metav1.CreateOptions) (result *v1.ClusterPolicy, err error) { + emptyResult := &v1.ClusterPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(clusterpoliciesResource, clusterPolicy, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterPolicy), err +} + +// Update takes the representation of a clusterPolicy and updates it. Returns the server's representation of the clusterPolicy, and an error, if there is any. +func (c *FakeClusterPolicies) Update(ctx context.Context, clusterPolicy *v1.ClusterPolicy, opts metav1.UpdateOptions) (result *v1.ClusterPolicy, err error) { + emptyResult := &v1.ClusterPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(clusterpoliciesResource, clusterPolicy, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterPolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterPolicies) UpdateStatus(ctx context.Context, clusterPolicy *v1.ClusterPolicy, opts metav1.UpdateOptions) (result *v1.ClusterPolicy, err error) { + emptyResult := &v1.ClusterPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(clusterpoliciesResource, "status", clusterPolicy, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterPolicy), err +} + +// Delete takes name of the clusterPolicy and deletes it. Returns an error if one occurs. +func (c *FakeClusterPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clusterpoliciesResource, name, opts), &v1.ClusterPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(clusterpoliciesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ClusterPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched clusterPolicy. +func (c *FakeClusterPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterPolicy, err error) { + emptyResult := &v1.ClusterPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterpoliciesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterPolicy), err +} diff --git a/api/versioned/typed/nvidia/v1/fake/fake_nvidia_client.go b/api/versioned/typed/nvidia/v1/fake/fake_nvidia_client.go new file mode 100644 index 000000000..aca78400a --- /dev/null +++ b/api/versioned/typed/nvidia/v1/fake/fake_nvidia_client.go @@ -0,0 +1,40 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNvidiaV1 struct { + *testing.Fake +} + +func (c *FakeNvidiaV1) ClusterPolicies() v1.ClusterPolicyInterface { + return &FakeClusterPolicies{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNvidiaV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/api/versioned/typed/nvidia/v1/generated_expansion.go b/api/versioned/typed/nvidia/v1/generated_expansion.go new file mode 100644 index 000000000..9727986c3 --- /dev/null +++ b/api/versioned/typed/nvidia/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ClusterPolicyExpansion interface{} diff --git a/api/versioned/typed/nvidia/v1/nvidia_client.go b/api/versioned/typed/nvidia/v1/nvidia_client.go new file mode 100644 index 000000000..15d235390 --- /dev/null +++ b/api/versioned/typed/nvidia/v1/nvidia_client.go @@ -0,0 +1,107 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + "github.com/NVIDIA/gpu-operator/api/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type NvidiaV1Interface interface { + RESTClient() rest.Interface + ClusterPoliciesGetter +} + +// NvidiaV1Client is used to interact with features provided by the nvidia group. +type NvidiaV1Client struct { + restClient rest.Interface +} + +func (c *NvidiaV1Client) ClusterPolicies() ClusterPolicyInterface { + return newClusterPolicies(c) +} + +// NewForConfig creates a new NvidiaV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NvidiaV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NvidiaV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NvidiaV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NvidiaV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NvidiaV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NvidiaV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NvidiaV1Client for the given RESTClient. +func New(c rest.Interface) *NvidiaV1Client { + return &NvidiaV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NvidiaV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/api/versioned/typed/nvidia/v1alpha1/doc.go b/api/versioned/typed/nvidia/v1alpha1/doc.go new file mode 100644 index 000000000..917274fbc --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/api/versioned/typed/nvidia/v1alpha1/fake/doc.go b/api/versioned/typed/nvidia/v1alpha1/fake/doc.go new file mode 100644 index 000000000..a8f211f18 --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/api/versioned/typed/nvidia/v1alpha1/fake/fake_nvidia_client.go b/api/versioned/typed/nvidia/v1alpha1/fake/fake_nvidia_client.go new file mode 100644 index 000000000..05c071875 --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/fake/fake_nvidia_client.go @@ -0,0 +1,40 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/NVIDIA/gpu-operator/api/versioned/typed/nvidia/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNvidiaV1alpha1 struct { + *testing.Fake +} + +func (c *FakeNvidiaV1alpha1) NVIDIADrivers() v1alpha1.NVIDIADriverInterface { + return &FakeNVIDIADrivers{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNvidiaV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/api/versioned/typed/nvidia/v1alpha1/fake/fake_nvidiadriver.go b/api/versioned/typed/nvidia/v1alpha1/fake/fake_nvidiadriver.go new file mode 100644 index 000000000..ef5fd04e6 --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/fake/fake_nvidiadriver.go @@ -0,0 +1,138 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNVIDIADrivers implements NVIDIADriverInterface +type FakeNVIDIADrivers struct { + Fake *FakeNvidiaV1alpha1 +} + +var nvidiadriversResource = v1alpha1.SchemeGroupVersion.WithResource("nvidiadrivers") + +var nvidiadriversKind = v1alpha1.SchemeGroupVersion.WithKind("NVIDIADriver") + +// Get takes name of the nVIDIADriver, and returns the corresponding nVIDIADriver object, and an error if there is any. +func (c *FakeNVIDIADrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NVIDIADriver, err error) { + emptyResult := &v1alpha1.NVIDIADriver{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(nvidiadriversResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.NVIDIADriver), err +} + +// List takes label and field selectors, and returns the list of NVIDIADrivers that match those selectors. +func (c *FakeNVIDIADrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NVIDIADriverList, err error) { + emptyResult := &v1alpha1.NVIDIADriverList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(nvidiadriversResource, nvidiadriversKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.NVIDIADriverList{ListMeta: obj.(*v1alpha1.NVIDIADriverList).ListMeta} + for _, item := range obj.(*v1alpha1.NVIDIADriverList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested nVIDIADrivers. +func (c *FakeNVIDIADrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(nvidiadriversResource, opts)) +} + +// Create takes the representation of a nVIDIADriver and creates it. Returns the server's representation of the nVIDIADriver, and an error, if there is any. +func (c *FakeNVIDIADrivers) Create(ctx context.Context, nVIDIADriver *v1alpha1.NVIDIADriver, opts v1.CreateOptions) (result *v1alpha1.NVIDIADriver, err error) { + emptyResult := &v1alpha1.NVIDIADriver{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(nvidiadriversResource, nVIDIADriver, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.NVIDIADriver), err +} + +// Update takes the representation of a nVIDIADriver and updates it. Returns the server's representation of the nVIDIADriver, and an error, if there is any. +func (c *FakeNVIDIADrivers) Update(ctx context.Context, nVIDIADriver *v1alpha1.NVIDIADriver, opts v1.UpdateOptions) (result *v1alpha1.NVIDIADriver, err error) { + emptyResult := &v1alpha1.NVIDIADriver{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(nvidiadriversResource, nVIDIADriver, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.NVIDIADriver), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNVIDIADrivers) UpdateStatus(ctx context.Context, nVIDIADriver *v1alpha1.NVIDIADriver, opts v1.UpdateOptions) (result *v1alpha1.NVIDIADriver, err error) { + emptyResult := &v1alpha1.NVIDIADriver{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(nvidiadriversResource, "status", nVIDIADriver, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.NVIDIADriver), err +} + +// Delete takes name of the nVIDIADriver and deletes it. Returns an error if one occurs. +func (c *FakeNVIDIADrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(nvidiadriversResource, name, opts), &v1alpha1.NVIDIADriver{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNVIDIADrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(nvidiadriversResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.NVIDIADriverList{}) + return err +} + +// Patch applies the patch and returns the patched nVIDIADriver. +func (c *FakeNVIDIADrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NVIDIADriver, err error) { + emptyResult := &v1alpha1.NVIDIADriver{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nvidiadriversResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.NVIDIADriver), err +} diff --git a/api/versioned/typed/nvidia/v1alpha1/generated_expansion.go b/api/versioned/typed/nvidia/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..f5e236215 --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type NVIDIADriverExpansion interface{} diff --git a/api/versioned/typed/nvidia/v1alpha1/nvidia_client.go b/api/versioned/typed/nvidia/v1alpha1/nvidia_client.go new file mode 100644 index 000000000..1fa172e04 --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/nvidia_client.go @@ -0,0 +1,107 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" + "github.com/NVIDIA/gpu-operator/api/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type NvidiaV1alpha1Interface interface { + RESTClient() rest.Interface + NVIDIADriversGetter +} + +// NvidiaV1alpha1Client is used to interact with features provided by the nvidia group. +type NvidiaV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NvidiaV1alpha1Client) NVIDIADrivers() NVIDIADriverInterface { + return newNVIDIADrivers(c) +} + +// NewForConfig creates a new NvidiaV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NvidiaV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NvidiaV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NvidiaV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NvidiaV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NvidiaV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NvidiaV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NvidiaV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NvidiaV1alpha1Client { + return &NvidiaV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NvidiaV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/api/versioned/typed/nvidia/v1alpha1/nvidiadriver.go b/api/versioned/typed/nvidia/v1alpha1/nvidiadriver.go new file mode 100644 index 000000000..21ae61bf5 --- /dev/null +++ b/api/versioned/typed/nvidia/v1alpha1/nvidiadriver.go @@ -0,0 +1,69 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" + scheme "github.com/NVIDIA/gpu-operator/api/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// NVIDIADriversGetter has a method to return a NVIDIADriverInterface. +// A group's client should implement this interface. +type NVIDIADriversGetter interface { + NVIDIADrivers() NVIDIADriverInterface +} + +// NVIDIADriverInterface has methods to work with NVIDIADriver resources. +type NVIDIADriverInterface interface { + Create(ctx context.Context, nVIDIADriver *v1alpha1.NVIDIADriver, opts v1.CreateOptions) (*v1alpha1.NVIDIADriver, error) + Update(ctx context.Context, nVIDIADriver *v1alpha1.NVIDIADriver, opts v1.UpdateOptions) (*v1alpha1.NVIDIADriver, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, nVIDIADriver *v1alpha1.NVIDIADriver, opts v1.UpdateOptions) (*v1alpha1.NVIDIADriver, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NVIDIADriver, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NVIDIADriverList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NVIDIADriver, err error) + NVIDIADriverExpansion +} + +// nVIDIADrivers implements NVIDIADriverInterface +type nVIDIADrivers struct { + *gentype.ClientWithList[*v1alpha1.NVIDIADriver, *v1alpha1.NVIDIADriverList] +} + +// newNVIDIADrivers returns a NVIDIADrivers +func newNVIDIADrivers(c *NvidiaV1alpha1Client) *nVIDIADrivers { + return &nVIDIADrivers{ + gentype.NewClientWithList[*v1alpha1.NVIDIADriver, *v1alpha1.NVIDIADriverList]( + "nvidiadrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.NVIDIADriver { return &v1alpha1.NVIDIADriver{} }, + func() *v1alpha1.NVIDIADriverList { return &v1alpha1.NVIDIADriverList{} }), + } +} diff --git a/assets/gpu-feature-discovery/0200_role.yaml b/assets/gpu-feature-discovery/0200_role.yaml index 52d1b606f..201042082 100644 --- a/assets/gpu-feature-discovery/0200_role.yaml +++ b/assets/gpu-feature-discovery/0200_role.yaml @@ -12,11 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch diff --git a/assets/gpu-feature-discovery/0500_daemonset.yaml b/assets/gpu-feature-discovery/0500_daemonset.yaml index 3bae575a7..40df1c193 100644 --- a/assets/gpu-feature-discovery/0500_daemonset.yaml +++ b/assets/gpu-feature-discovery/0500_daemonset.yaml @@ -33,9 +33,51 @@ spec: securityContext: privileged: true volumeMounts: - - name: run-nvidia - mountPath: /run/nvidia - mountPropagation: Bidirectional + - name: run-nvidia-validations + mountPath: /run/nvidia/validations + mountPropagation: HostToContainer + - name: gpu-feature-discovery-imex-init + image: "FILLED BY THE OPERATOR" + command: ["/bin/bash", "-c"] + args: + - | + until [[ -f /run/nvidia/validations/driver-ready ]] + do + echo "waiting for the driver validations to be ready..." + sleep 5 + done + set -o allexport + cat /run/nvidia/validations/driver-ready + . /run/nvidia/validations/driver-ready + + IMEX_NODES_CONFIG_FILE=/etc/nvidia-imex/nodes_config.cfg + if [[ -f /config/${IMEX_NODES_CONFIG_FILE} ]]; then + echo "Removing cached IMEX nodes config" + rm -f /config/${IMEX_NODES_CONFIG_FILE} + fi + if [[ ! -f ${DRIVER_ROOT_CTR_PATH}/${IMEX_NODES_CONFIG_FILE} ]]; then + echo "No IMEX nodes config path detected; Skipping" + exit 0 + fi + echo "Copying IMEX nodes config" + mkdir -p $(dirname /config/${IMEX_NODES_CONFIG_FILE}) + cp ${DRIVER_ROOT_CTR_PATH}/${IMEX_NODES_CONFIG_FILE} /config/${IMEX_NODES_CONFIG_FILE} + securityContext: + privileged: true + volumeMounts: + - name: config + mountPath: /config + - name: run-nvidia-validations + mountPath: /run/nvidia/validations + mountPropagation: HostToContainer + - name: host-root + mountPath: /host/etc + subPath: etc + readOnly: true + - name: driver-install-dir + mountPath: /driver-root/etc + subPath: etc + readOnly: true - name: config-manager-init image: "FILLED BY THE OPERATOR" command: ["config-manager"] @@ -62,6 +104,9 @@ spec: value: "" - name: PROCESS_TO_SIGNAL value: "" + volumeMounts: + - name: config + mountPath: /config containers: - image: "FILLED BY THE OPERATOR" name: gpu-feature-discovery @@ -71,12 +116,22 @@ spec: value: 60s - name: GFD_FAIL_ON_INIT_ERROR value: "true" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName volumeMounts: - name: output-dir mountPath: "/etc/kubernetes/node-feature-discovery/features.d" - name: dmi-info-dir mountPath: "/sys/class/dmi/id" readOnly: true + - name: config + mountPath: /config securityContext: privileged: true - image: "FILLED BY THE OPERATOR" @@ -106,7 +161,10 @@ spec: - name: SIGNAL value: "1" # SIGHUP - name: PROCESS_TO_SIGNAL - value: "/usr/bin/gpu-feature-discovery" + value: "gpu-feature-discovery" + volumeMounts: + - name: config + mountPath: /config volumes: - name: output-dir hostPath: @@ -114,7 +172,16 @@ spec: - name: dmi-info-dir hostPath: path: "/sys/class/dmi/id" - - name: run-nvidia + - name: run-nvidia-validations + hostPath: + path: "/run/nvidia/validations" + type: DirectoryOrCreate + - name: host-root + hostPath: + path: / + - name: driver-install-dir hostPath: - path: "/run/nvidia" - type: Directory + path: /run/nvidia/driver + type: DirectoryOrCreate + - name: config + emptyDir: {} diff --git a/assets/state-cc-manager/0200_role.yaml b/assets/state-cc-manager/0200_role.yaml index 795b9c234..0afa4f919 100644 --- a/assets/state-cc-manager/0200_role.yaml +++ b/assets/state-cc-manager/0200_role.yaml @@ -12,3 +12,11 @@ rules: - use resourceNames: - privileged +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch diff --git a/assets/state-cc-manager/0210_clusterrole.yaml b/assets/state-cc-manager/0210_clusterrole.yaml index 6ee96a97c..f6c2b3e88 100644 --- a/assets/state-cc-manager/0210_clusterrole.yaml +++ b/assets/state-cc-manager/0210_clusterrole.yaml @@ -7,7 +7,6 @@ rules: - "" resources: - nodes - - pods verbs: - get - list diff --git a/assets/state-container-toolkit/0200_role.yaml b/assets/state-container-toolkit/0200_role.yaml index ad8f93a7a..22b86bea9 100644 --- a/assets/state-container-toolkit/0200_role.yaml +++ b/assets/state-container-toolkit/0200_role.yaml @@ -12,3 +12,9 @@ rules: - use resourceNames: - privileged +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - list diff --git a/assets/state-container-toolkit/0400_configmap.yaml b/assets/state-container-toolkit/0400_configmap.yaml index ada0872fe..cca4ee4e9 100644 --- a/assets/state-container-toolkit/0400_configmap.yaml +++ b/assets/state-container-toolkit/0400_configmap.yaml @@ -9,17 +9,15 @@ data: entrypoint.sh: |- #!/bin/bash - set -e + until [[ -f /run/nvidia/validations/driver-ready ]] + do + echo "waiting for the driver validations to be ready..." + sleep 5 + done - driver_root=/run/nvidia/driver - driver_root_ctr_path=$driver_root - if [[ -f /run/nvidia/validations/host-driver-ready ]]; then - driver_root=/ - driver_root_ctr_path=/host - fi - - export NVIDIA_DRIVER_ROOT=$driver_root - export DRIVER_ROOT_CTR_PATH=$driver_root_ctr_path + set -o allexport + cat /run/nvidia/validations/driver-ready + . /run/nvidia/validations/driver-ready # # The below delay is a workaround for an issue affecting some versions diff --git a/assets/state-container-toolkit/0500_daemonset.yaml b/assets/state-container-toolkit/0500_daemonset.yaml index 85f68869d..b10949460 100644 --- a/assets/state-container-toolkit/0500_daemonset.yaml +++ b/assets/state-container-toolkit/0500_daemonset.yaml @@ -36,12 +36,16 @@ spec: value: "true" - name: COMPONENT value: driver + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace securityContext: privileged: true seLinuxOptions: level: "s0" volumeMounts: - - name: driver-install-path + - name: driver-install-dir mountPath: /run/nvidia/driver mountPropagation: HostToContainer - name: run-nvidia-validations @@ -67,6 +71,8 @@ spec: value: "management.nvidia.com/gpu" - name: NVIDIA_VISIBLE_DEVICES value: "void" + - name: TOOLKIT_PID_FILE + value: "/run/nvidia/toolkit/toolkit.pid" imagePullPolicy: IfNotPresent name: nvidia-container-toolkit-ctr securityContext: @@ -78,13 +84,17 @@ spec: readOnly: true mountPath: /bin/entrypoint.sh subPath: entrypoint.sh - - name: nvidia-run-path - mountPath: /run/nvidia - mountPropagation: Bidirectional + - name: toolkit-root + mountPath: /run/nvidia/toolkit + - name: run-nvidia-validations + mountPath: /run/nvidia/validations - name: toolkit-install-dir mountPath: /usr/local/nvidia - name: crio-hooks mountPath: /usr/share/containers/oci/hooks.d + - name: driver-install-dir + mountPath: /driver-root + mountPropagation: HostToContainer - name: host-root mountPath: /host readOnly: true @@ -96,17 +106,18 @@ spec: configMap: name: nvidia-container-toolkit-entrypoint defaultMode: 448 - - name: nvidia-run-path + - name: toolkit-root hostPath: - path: /run/nvidia + path: /run/nvidia/toolkit type: DirectoryOrCreate - name: run-nvidia-validations hostPath: path: /run/nvidia/validations type: DirectoryOrCreate - - name: driver-install-path + - name: driver-install-dir hostPath: path: /run/nvidia/driver + type: DirectoryOrCreate - name: host-root hostPath: path: / diff --git a/assets/state-dcgm-exporter/0200_role.yaml b/assets/state-dcgm-exporter/0200_role.yaml index 0e6d68768..f055a3b34 100644 --- a/assets/state-dcgm-exporter/0200_role.yaml +++ b/assets/state-dcgm-exporter/0200_role.yaml @@ -15,6 +15,7 @@ rules: - apiGroups: - "" resources: + - configmaps - pods verbs: - get diff --git a/assets/state-dcgm/0400_dcgm.yml b/assets/state-dcgm/0400_dcgm.yml index 3414e824f..14fea317a 100644 --- a/assets/state-dcgm/0400_dcgm.yml +++ b/assets/state-dcgm/0400_dcgm.yml @@ -35,7 +35,6 @@ spec: - name: run-nvidia mountPath: /run/nvidia mountPropagation: HostToContainer - hostNetwork: true containers: - image: "FILLED BY THE OPERATOR" name: nvidia-dcgm-ctr @@ -44,7 +43,6 @@ spec: ports: - name: "dcgm" containerPort: 5555 - hostPort: 5555 volumes: - name: run-nvidia hostPath: diff --git a/assets/state-dcgm/0500_service.yaml b/assets/state-dcgm/0500_service.yaml new file mode 100644 index 000000000..5ea9ac10b --- /dev/null +++ b/assets/state-dcgm/0500_service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: nvidia-dcgm + name: nvidia-dcgm + namespace: "FILLED BY THE OPERATOR" +spec: + internalTrafficPolicy: Local + ports: + - name: dcgm + port: 5555 + protocol: TCP + selector: + app: nvidia-dcgm + type: ClusterIP diff --git a/assets/state-device-plugin/0200_role.yaml b/assets/state-device-plugin/0200_role.yaml index 8d9b6691a..e188d60b5 100644 --- a/assets/state-device-plugin/0200_role.yaml +++ b/assets/state-device-plugin/0200_role.yaml @@ -12,11 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch diff --git a/assets/state-device-plugin/0400_configmap.yaml b/assets/state-device-plugin/0400_configmap.yaml index 6bfba993b..651fd5421 100644 --- a/assets/state-device-plugin/0400_configmap.yaml +++ b/assets/state-device-plugin/0400_configmap.yaml @@ -9,28 +9,15 @@ data: entrypoint.sh: |- #!/bin/bash - driver_root="" - container_driver_root="" - while true; do - if [[ -f /run/nvidia/validations/host-driver-ready ]]; then - driver_root=/ - container_driver_root=/host - break - elif [[ -f /run/nvidia/validations/driver-ready ]]; then - driver_root=/run/nvidia/driver - container_driver_root=$driver_root - break - else - echo "waiting for the driver validations to be ready..." - sleep 5 - fi + until [[ -f /run/nvidia/validations/driver-ready ]] + do + echo "waiting for the driver validations to be ready..." + sleep 5 done - - export NVIDIA_DRIVER_ROOT=$driver_root - echo "NVIDIA_DRIVER_ROOT=$NVIDIA_DRIVER_ROOT" - - export CONTAINER_DRIVER_ROOT=$container_driver_root - echo "CONTAINER_DRIVER_ROOT=$CONTAINER_DRIVER_ROOT" + + set -o allexport + cat /run/nvidia/validations/driver-ready + . /run/nvidia/validations/driver-ready echo "Starting nvidia-device-plugin" exec nvidia-device-plugin diff --git a/assets/state-device-plugin/0500_daemonset.yaml b/assets/state-device-plugin/0500_daemonset.yaml index d82e590d2..e6a68bd16 100644 --- a/assets/state-device-plugin/0500_daemonset.yaml +++ b/assets/state-device-plugin/0500_daemonset.yaml @@ -32,8 +32,8 @@ spec: securityContext: privileged: true volumeMounts: - - name: run-nvidia - mountPath: /run/nvidia + - name: run-nvidia-validations + mountPath: /run/nvidia/validations mountPropagation: HostToContainer - image: "FILLED BY THE OPERATOR" name: config-manager-init @@ -61,6 +61,9 @@ spec: value: "" - name: PROCESS_TO_SIGNAL value: "" + volumeMounts: + - name: config + mountPath: /config containers: - image: "FILLED BY THE OPERATOR" name: nvidia-device-plugin @@ -91,8 +94,10 @@ spec: subPath: entrypoint.sh - name: device-plugin mountPath: /var/lib/kubelet/device-plugins - - name: run-nvidia - mountPath: /run/nvidia + - name: run-nvidia-validations + mountPath: /run/nvidia/validations + - name: driver-install-dir + mountPath: /driver-root mountPropagation: HostToContainer - name: host-root mountPath: /host @@ -105,6 +110,8 @@ spec: mountPath: /dev/shm - name: mps-root mountPath: /mps + - name: config + mountPath: /config - image: "FILLED BY THE OPERATOR" name: config-manager command: ["config-manager"] @@ -133,6 +140,9 @@ spec: value: "1" # SIGHUP - name: PROCESS_TO_SIGNAL value: "nvidia-device-plugin" + volumeMounts: + - name: config + mountPath: /config volumes: - name: nvidia-device-plugin-entrypoint configMap: @@ -141,10 +151,14 @@ spec: - name: device-plugin hostPath: path: /var/lib/kubelet/device-plugins - - name: run-nvidia + - name: run-nvidia-validations hostPath: - path: "/run/nvidia" - type: Directory + path: "/run/nvidia/validations" + type: DirectoryOrCreate + - name: driver-install-dir + hostPath: + path: "/run/nvidia/driver" + type: DirectoryOrCreate - name: host-root hostPath: path: / @@ -159,3 +173,5 @@ spec: - name: mps-shm hostPath: path: /run/nvidia/mps/shm + - name: config + emptyDir: {} diff --git a/assets/state-driver/0200_role.yaml b/assets/state-driver/0200_role.yaml index 604c2a3df..a2cb330e6 100644 --- a/assets/state-driver/0200_role.yaml +++ b/assets/state-driver/0200_role.yaml @@ -12,20 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' diff --git a/assets/state-driver/0210_clusterrole.yaml b/assets/state-driver/0210_clusterrole.yaml index 6bdb2c527..e62fbf316 100644 --- a/assets/state-driver/0210_clusterrole.yaml +++ b/assets/state-driver/0210_clusterrole.yaml @@ -35,9 +35,6 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get diff --git a/assets/state-driver/0500_daemonset.yaml b/assets/state-driver/0500_daemonset.yaml index 040043d43..13f83884a 100644 --- a/assets/state-driver/0500_daemonset.yaml +++ b/assets/state-driver/0500_daemonset.yaml @@ -94,6 +94,15 @@ spec: name: nvidia-driver-ctr command: ["nvidia-driver"] args: ["init"] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP securityContext: privileged: true seLinuxOptions: diff --git a/assets/state-mig-manager/0200_role.yaml b/assets/state-mig-manager/0200_role.yaml index 0e9c291e8..5396cbeaa 100644 --- a/assets/state-mig-manager/0200_role.yaml +++ b/assets/state-mig-manager/0200_role.yaml @@ -16,6 +16,8 @@ rules: - "" resources: - pods - - nodes verbs: - - '*' + - get + - list + - watch + - delete diff --git a/assets/state-mig-manager/0210_clusterrole.yaml b/assets/state-mig-manager/0210_clusterrole.yaml index 37e115af9..2e9e9e8b9 100644 --- a/assets/state-mig-manager/0210_clusterrole.yaml +++ b/assets/state-mig-manager/0210_clusterrole.yaml @@ -8,4 +8,8 @@ rules: resources: - nodes verbs: - - '*' + - get + - list + - watch + - update + - patch diff --git a/assets/state-mig-manager/0400_configmap.yaml b/assets/state-mig-manager/0400_configmap.yaml index f81b66969..753aeb499 100644 --- a/assets/state-mig-manager/0400_configmap.yaml +++ b/assets/state-mig-manager/0400_configmap.yaml @@ -56,7 +56,7 @@ data: # H100-80GB, H800-80GB, A100-80GB, A800-80GB, A100-40GB, A800-40GB all-1g.10gb: # H100-80GB, H800-80GB, A100-80GB, A800-80GB - - device-filter: ["0x233010DE", "0x233110DE", "0x232210DE", "0x20B210DE", "0x20B510DE", "0x20F310DE", "0x20F510DE"] + - device-filter: ["0x233010DE", "0x233110DE", "0x232210DE", "0x20B210DE", "0x20B510DE", "0x20F310DE", "0x20F510DE", "0x232410DE"] devices: all mig-enabled: true mig-devices: @@ -201,20 +201,37 @@ data: mig-devices: "7g.96gb": 1 - - # H100-96GB, GH200, H100 NVL, H800 NVL, H100-80GB, H800-80GB, A800-40GB, A800-80GB, A100-40GB, A100-80GB, A30-24GB, PG506-96GB + # GH200 144G HBM3e, H200-141GB, H200 NVL, H100-96GB, GH200, H100 NVL, H800 NVL, H100-80GB, H800-80GB, A800-40GB, A800-80GB, A100-40GB, A100-80GB, A30-24GB, PG506-96GB all-balanced: + # GH200 144G HBM3e + - device-filter: ["0x234810DE"] + devices: all + mig-enabled: true + mig-devices: + "1g.18gb": 2 + "2g.36gb": 1 + "3g.72gb": 1 + + # H200 141GB, H200 NVL + - device-filter: ["0x233510DE", "0x233B10DE"] + devices: all + mig-enabled: true + mig-devices: + "1g.18gb": 2 + "2g.35gb": 1 + "3g.71gb": 1 + # H100 NVL, H800 NVL - device-filter: ["0x232110DE", "0x233A10DE"] devices: all mig-enabled: true mig-devices: - "1g.12gb": 1 + "1g.12gb": 2 "2g.24gb": 1 "3g.47gb": 1 # H100-80GB, H800-80GB, A100-80GB, A800-80GB - - device-filter: ["0x233010DE", "0x233110DE", "0x232210DE", "0x20B210DE", "0x20B510DE", "0x20F310DE", "0x20F510DE"] + - device-filter: ["0x233010DE", "0x233110DE", "0x232210DE", "0x20B210DE", "0x20B510DE", "0x20F310DE", "0x20F510DE", "0x232410DE"] devices: all mig-enabled: true mig-devices: @@ -239,11 +256,86 @@ data: "1g.6gb": 2 "2g.12gb": 1 - # H100-96GB, PG506-96GB, GH200 - - device-filter: ["0x234210DE", "0x233D10DE", "0x20B610DE"] + # H100-96GB, PG506-96GB, GH200, H20 + - device-filter: ["0x234210DE", "0x233D10DE", "0x20B610DE", "0x232910DE"] devices: all mig-enabled: true mig-devices: "1g.12gb": 2 "2g.24gb": 1 "3g.48gb": 1 + + # H200-141GB, GH200 144G HBM3e + all-1g.18gb: + - devices: all + mig-enabled: true + mig-devices: + "1g.18gb": 7 + + all-1g.18gb.me: + - devices: all + mig-enabled: true + mig-devices: + "1g.18gb+me": 1 + + # H200-141GB + all-1g.35gb: + - devices: all + mig-enabled: true + mig-devices: + "1g.35gb": 4 + + all-2g.35gb: + - devices: all + mig-enabled: true + mig-devices: + "2g.35gb": 3 + + all-3g.71gb: + - devices: all + mig-enabled: true + mig-devices: + "3g.71gb": 2 + + all-4g.71gb: + - devices: all + mig-enabled: true + mig-devices: + "4g.71gb": 1 + + all-7g.141gb: + - devices: all + mig-enabled: true + mig-devices: + "7g.141gb": 1 + + # GH200 144G HBM3e + all-1g.36gb: + - devices: all + mig-enabled: true + mig-devices: + "1g.36gb": 4 + + all-2g.36gb: + - devices: all + mig-enabled: true + mig-devices: + "2g.36gb": 3 + + all-3g.72gb: + - devices: all + mig-enabled: true + mig-devices: + "3g.72gb": 2 + + all-4g.72gb: + - devices: all + mig-enabled: true + mig-devices: + "4g.72gb": 1 + + all-7g.144gb: + - devices: all + mig-enabled: true + mig-devices: + "7g.144gb": 1 diff --git a/assets/state-mig-manager/0420_configmap.yaml b/assets/state-mig-manager/0420_configmap.yaml index 5c9e9f1ab..7fbfc0d78 100644 --- a/assets/state-mig-manager/0420_configmap.yaml +++ b/assets/state-mig-manager/0420_configmap.yaml @@ -9,34 +9,19 @@ data: entrypoint.sh: |- #!/bin/bash - host_driver="" - driver_root="" - driver_root_ctr_path="" - while true; do - if [[ -f /run/nvidia/validations/host-driver-ready ]]; then - host_driver=true - driver_root="/" - driver_root_ctr_path="/host" - break - elif [[ -f /run/nvidia/validations/driver-ready ]]; then - host_driver=false - driver_root="/run/nvidia/driver" - driver_root_ctr_path="/run/nvidia/driver" - break - else - echo "waiting for the driver validations to be ready..." - sleep 5 - fi + until [[ -f /run/nvidia/validations/driver-ready ]] + do + echo "waiting for the driver validations to be ready..." + sleep 5 done - - export WITH_SHUTDOWN_HOST_GPU_CLIENTS=$host_driver + + set -o allexport + cat /run/nvidia/validations/driver-ready + . /run/nvidia/validations/driver-ready + + # manually export additional envs required by mig-manager + export WITH_SHUTDOWN_HOST_GPU_CLIENTS=$IS_HOST_DRIVER echo "WITH_SHUTDOWN_HOST_GPU_CLIENTS=$WITH_SHUTDOWN_HOST_GPU_CLIENTS" - export DRIVER_ROOT=$driver_root - echo "DRIVER_ROOT=$DRIVER_ROOT" - - export DRIVER_ROOT_CTR_PATH=$driver_root_ctr_path - echo "DRIVER_ROOT_CTR_PATH=$DRIVER_ROOT_CTR_PATH" - echo "Starting nvidia-mig-manager" exec nvidia-mig-manager diff --git a/assets/state-mig-manager/0600_daemonset.yaml b/assets/state-mig-manager/0600_daemonset.yaml index 2aadec4d5..e8676b27b 100644 --- a/assets/state-mig-manager/0600_daemonset.yaml +++ b/assets/state-mig-manager/0600_daemonset.yaml @@ -32,8 +32,8 @@ spec: securityContext: privileged: true volumeMounts: - - name: run-nvidia - mountPath: /run/nvidia + - name: run-nvidia-validations + mountPath: /run/nvidia/validations mountPropagation: HostToContainer containers: - name: nvidia-mig-manager @@ -62,6 +62,8 @@ spec: readOnly: true mountPath: /bin/entrypoint.sh subPath: entrypoint.sh + - name: run-nvidia-validations + mountPath: /run/nvidia/validations - mountPath: /sys name: host-sys - mountPath: /mig-parted-config @@ -71,8 +73,8 @@ spec: mountPropagation: HostToContainer - mountPath: /gpu-clients name: gpu-clients - - name: run-nvidia - mountPath: /run/nvidia + - name: driver-install-dir + mountPath: /driver-root mountPropagation: HostToContainer - name: cdi-root mountPath: /var/run/cdi @@ -88,10 +90,14 @@ spec: - name: mig-parted-config configMap: name: "FILLED_BY_OPERATOR" - - name: run-nvidia + - name: run-nvidia-validations hostPath: - path: "/run/nvidia" - type: Directory + path: "/run/nvidia/validations" + type: DirectoryOrCreate + - name: driver-install-dir + hostPath: + path: "/run/nvidia/driver" + type: DirectoryOrCreate - name: host-root hostPath: path: "/" diff --git a/assets/state-mps-control-daemon/0200_role.yaml b/assets/state-mps-control-daemon/0200_role.yaml index 808c51e7f..1152135fe 100644 --- a/assets/state-mps-control-daemon/0200_role.yaml +++ b/assets/state-mps-control-daemon/0200_role.yaml @@ -12,12 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - diff --git a/assets/state-mps-control-daemon/0400_daemonset.yaml b/assets/state-mps-control-daemon/0400_daemonset.yaml index 16fe81301..097ce8ca9 100644 --- a/assets/state-mps-control-daemon/0400_daemonset.yaml +++ b/assets/state-mps-control-daemon/0400_daemonset.yaml @@ -25,6 +25,7 @@ spec: effect: NoSchedule priorityClassName: system-node-critical serviceAccountName: nvidia-device-plugin + hostPID: true initContainers: - image: "FILLED BY THE OPERATOR" name: toolkit-validation @@ -71,6 +72,9 @@ spec: value: "" - name: PROCESS_TO_SIGNAL value: "" + volumeMounts: + - name: config + mountPath: /config containers: - image: "FILLED BY OPERATOR" name: mps-control-daemon-ctr @@ -92,6 +96,8 @@ spec: mountPath: /dev/shm - name: mps-root mountPath: /mps + - name: config + mountPath: /config - image: "FILLED BY THE OPERATOR" name: config-manager command: ["config-manager"] @@ -120,6 +126,9 @@ spec: value: "1" # SIGHUP - name: PROCESS_TO_SIGNAL value: "/usr/bin/mps-control-daemon" + volumeMounts: + - name: config + mountPath: /config volumes: - name: run-nvidia hostPath: @@ -132,3 +141,5 @@ spec: - name: mps-shm hostPath: path: /run/nvidia/mps/shm + - name: config + emptyDir: {} diff --git a/assets/state-node-status-exporter/0200_role.yaml b/assets/state-node-status-exporter/0200_role.yaml index 52476b451..d74b46a94 100644 --- a/assets/state-node-status-exporter/0200_role.yaml +++ b/assets/state-node-status-exporter/0200_role.yaml @@ -16,7 +16,14 @@ rules: - "" resources: - pods - - nodes verbs: - get - list +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch diff --git a/assets/state-node-status-exporter/0300_clusterrole.yaml b/assets/state-node-status-exporter/0300_clusterrole.yaml index 245c84005..6f91fe237 100644 --- a/assets/state-node-status-exporter/0300_clusterrole.yaml +++ b/assets/state-node-status-exporter/0300_clusterrole.yaml @@ -8,4 +8,6 @@ rules: resources: - nodes verbs: - - '*' + - get + - list + - watch diff --git a/assets/state-node-status-exporter/0700_daemonset.yaml b/assets/state-node-status-exporter/0700_daemonset.yaml index 6ec4036bc..2d6d830ad 100644 --- a/assets/state-node-status-exporter/0700_daemonset.yaml +++ b/assets/state-node-status-exporter/0700_daemonset.yaml @@ -40,6 +40,10 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace ports: - name: node-status containerPort: 8000 diff --git a/assets/state-operator-validation/0200_role.yaml b/assets/state-operator-validation/0200_role.yaml index ef07efc03..5c464f4e7 100644 --- a/assets/state-operator-validation/0200_role.yaml +++ b/assets/state-operator-validation/0200_role.yaml @@ -16,19 +16,19 @@ rules: - "" resources: - pods - - nodes verbs: - - '*' + - create + - get + - list + - watch + - update + - patch + - delete - apiGroups: - apps resources: - - deployments - daemonsets verbs: - - '*' -- apiGroups: - - nvidia.com - resources: - - clusterpolicies/finalizers - verbs: - - '*' + - get + - list + - watch diff --git a/assets/state-operator-validation/0210_clusterrole.yaml b/assets/state-operator-validation/0210_clusterrole.yaml index c7a13b95f..324fe47c7 100644 --- a/assets/state-operator-validation/0210_clusterrole.yaml +++ b/assets/state-operator-validation/0210_clusterrole.yaml @@ -8,10 +8,19 @@ rules: resources: - nodes verbs: - - '*' + - get + - list + - watch - apiGroups: - nvidia.com resources: - clusterpolicies/finalizers + - nvidiadrivers/finalizers verbs: - - '*' + - create + - get + - list + - watch + - update + - patch + - delete diff --git a/assets/state-operator-validation/0500_daemonset.yaml b/assets/state-operator-validation/0500_daemonset.yaml index e25f060bf..72a7d72a6 100644 --- a/assets/state-operator-validation/0500_daemonset.yaml +++ b/assets/state-operator-validation/0500_daemonset.yaml @@ -35,6 +35,10 @@ spec: value: "true" - name: COMPONENT value: driver + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace securityContext: privileged: true seLinuxOptions: @@ -44,7 +48,7 @@ spec: mountPath: /host readOnly: true mountPropagation: HostToContainer - - name: driver-install-path + - name: driver-install-dir mountPath: /run/nvidia/driver mountPropagation: HostToContainer - name: run-nvidia-validations @@ -160,7 +164,7 @@ spec: hostPath: path: /run/nvidia/validations type: DirectoryOrCreate - - name: driver-install-path + - name: driver-install-dir hostPath: path: /run/nvidia/driver - name: host-root diff --git a/assets/state-sandbox-device-plugin/0200_role.yaml b/assets/state-sandbox-device-plugin/0200_role.yaml index 3e37487ce..2f5085e51 100644 --- a/assets/state-sandbox-device-plugin/0200_role.yaml +++ b/assets/state-sandbox-device-plugin/0200_role.yaml @@ -12,11 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - "get" diff --git a/assets/state-sandbox-device-plugin/0500_daemonset.yaml b/assets/state-sandbox-device-plugin/0500_daemonset.yaml index 6c9e2c24a..f99b6f075 100644 --- a/assets/state-sandbox-device-plugin/0500_daemonset.yaml +++ b/assets/state-sandbox-device-plugin/0500_daemonset.yaml @@ -67,6 +67,8 @@ spec: volumeMounts: - name: device-plugin mountPath: /var/lib/kubelet/device-plugins + - name: vfio + mountPath: /dev/vfio volumes: - name: device-plugin hostPath: @@ -75,3 +77,6 @@ spec: hostPath: path: /run/nvidia/validations type: DirectoryOrCreate + - name: vfio + hostPath: + path: /dev/vfio diff --git a/assets/state-sandbox-validation/0200_role.yaml b/assets/state-sandbox-validation/0200_role.yaml index d27405101..79da66ff7 100644 --- a/assets/state-sandbox-validation/0200_role.yaml +++ b/assets/state-sandbox-validation/0200_role.yaml @@ -12,9 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - nodes - verbs: - - get diff --git a/assets/state-vfio-manager/0400_configmap.yaml b/assets/state-vfio-manager/0400_configmap.yaml index bf333bb92..85df8f8ca 100644 --- a/assets/state-vfio-manager/0400_configmap.yaml +++ b/assets/state-vfio-manager/0400_configmap.yaml @@ -94,6 +94,12 @@ data: echo "unbinding device $gpu" unbind_from_driver $gpu + #for graphics mode, we need to unbind the auxiliary device as well + aux_dev=$(get_graphics_aux_dev "$gpu") + if [ "$aux_dev" != "NONE" ]; then + echo "gpu $gpu is in graphics mode aux_dev $aux_dev" + unbind_from_driver "$aux_dev" + fi } unbind_all() { @@ -106,13 +112,9 @@ data: done } - bind_device() { + bind_pci_device() { local gpu=$1 - if ! is_nvidia_gpu_device $gpu; then - return 0 - fi - if ! is_bound_to_vfio $gpu; then unbind_from_other_driver $gpu echo "binding device $gpu" @@ -123,6 +125,48 @@ data: fi } + get_graphics_aux_dev() { + local gpu=$1 + device_class_file=$(readlink -f "/sys/bus/pci/devices/$gpu/class") + device_class=$(cat "$device_class_file") + if [ "$device_class" != "0x030000" ]; then + echo "NONE" + return + fi + + if ls "/sys/bus/pci/devices/$gpu" | grep consumer >& /dev/null; then + aux_dev=$(ls "/sys/bus/pci/devices/$gpu" | grep consumer | awk -Fconsumer:pci: '{print $2}') + if [ "$aux_dev" == "" ]; then + echo "NONE" + return + fi + + if ls "/sys/bus/pci/devices/$aux_dev/" >& /dev/null; then + echo "$aux_dev" + return + fi + fi + + echo "NONE" + } + + bind_device() { + local gpu=$1 + + if ! is_nvidia_gpu_device $gpu; then + echo "device $gpu is not a gpu!" + return 0 + fi + + bind_pci_device "$gpu" + #for graphics mode, we need to bind the auxiliary device as well + aux_dev=$(get_graphics_aux_dev "$gpu") + if [ "$aux_dev" != "NONE" ]; then + echo "gpu $gpu is in graphics mode aux_dev $aux_dev" + bind_pci_device "$aux_dev" + fi + } + bind_all() { for dev in /sys/bus/pci/devices/*; do read vendor < $dev/vendor diff --git a/assets/state-vgpu-device-manager/0200_role.yaml b/assets/state-vgpu-device-manager/0200_role.yaml new file mode 100644 index 000000000..9b420d6f4 --- /dev/null +++ b/assets/state-vgpu-device-manager/0200_role.yaml @@ -0,0 +1,22 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: nvidia-vgpu-device-manager + namespace: "FILLED BY THE OPERATOR" +rules: +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - privileged +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch diff --git a/assets/state-vgpu-device-manager/0210_clusterrole.yaml b/assets/state-vgpu-device-manager/0210_clusterrole.yaml index e3998da32..3d61f324b 100644 --- a/assets/state-vgpu-device-manager/0210_clusterrole.yaml +++ b/assets/state-vgpu-device-manager/0210_clusterrole.yaml @@ -14,8 +14,6 @@ rules: - "" resources: - nodes - - pods - - pods/eviction verbs: - get - list diff --git a/assets/state-vgpu-device-manager/0300_rolebinding.yaml b/assets/state-vgpu-device-manager/0300_rolebinding.yaml new file mode 100644 index 000000000..f50115a08 --- /dev/null +++ b/assets/state-vgpu-device-manager/0300_rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: nvidia-vgpu-device-manager + namespace: "FILLED BY THE OPERATOR" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nvidia-vgpu-device-manager +subjects: +- kind: ServiceAccount + name: nvidia-vgpu-device-manager + namespace: "FILLED BY THE OPERATOR" diff --git a/assets/state-vgpu-device-manager/0500_configmap.yaml b/assets/state-vgpu-device-manager/0500_configmap.yaml index 684bbef6b..8490bbece 100644 --- a/assets/state-vgpu-device-manager/0500_configmap.yaml +++ b/assets/state-vgpu-device-manager/0500_configmap.yaml @@ -39,46 +39,6 @@ data: - devices: all vgpu-devices: A2-16Q: 1 - A10M-1Q: - - devices: all - vgpu-devices: - A10M-1Q: 20 - A10M-2Q: - - devices: all - vgpu-devices: - A10M-2Q: 10 - A10M-4C: - - devices: all - vgpu-devices: - A10M-4C: 5 - A10M-4Q: - - devices: all - vgpu-devices: - A10M-4Q: 5 - A10M-5C: - - devices: all - vgpu-devices: - A10M-5C: 4 - A10M-5Q: - - devices: all - vgpu-devices: - A10M-5Q: 4 - A10M-10C: - - devices: all - vgpu-devices: - A10M-10C: 2 - A10M-10Q: - - devices: all - vgpu-devices: - A10M-10Q: 2 - A10M-20C: - - devices: all - vgpu-devices: - A10M-20C: 1 - A10M-20Q: - - devices: all - vgpu-devices: - A10M-20Q: 1 A10-1Q: - devices: all vgpu-devices: @@ -175,6 +135,10 @@ data: - devices: all vgpu-devices: A30-2-12C: 2 + A30-2-12CME: + - devices: all + vgpu-devices: + A30-2-12CME: 1 A30-4C: - devices: all vgpu-devices: @@ -275,6 +239,10 @@ data: - devices: all vgpu-devices: A100D-1-10CME: 1 + A100D-1-20C: + - devices: all + vgpu-devices: + A100D-1-20C: 4 A100D-2-20C: - devices: all vgpu-devices: @@ -327,6 +295,10 @@ data: - devices: all vgpu-devices: A100DX-1-10CME: 1 + A100DX-1-20C: + - devices: all + vgpu-devices: + A100DX-1-20C: 4 A100DX-2-20C: - devices: all vgpu-devices: @@ -379,6 +351,10 @@ data: - devices: all vgpu-devices: A100X-1-5CME: 1 + A100X-1-10C: + - devices: all + vgpu-devices: + A100X-1-10C: 4 A100X-2-10C: - devices: all vgpu-devices: @@ -427,6 +403,10 @@ data: - devices: all vgpu-devices: A100-1-5CME: 1 + A100-1-10C: + - devices: all + vgpu-devices: + A100-1-10C: 4 A100-2-10C: - devices: all vgpu-devices: @@ -475,6 +455,10 @@ data: - devices: all vgpu-devices: A800D-1-10CME: 1 + A800D-1-20C: + - devices: all + vgpu-devices: + A800D-1-20C: 4 A800D-2-20C: - devices: all vgpu-devices: @@ -527,6 +511,10 @@ data: - devices: all vgpu-devices: A800DX-1-10CME: 1 + A800DX-1-20C: + - devices: all + vgpu-devices: + A800DX-1-20C: 4 A800DX-2-20C: - devices: all vgpu-devices: @@ -571,6 +559,374 @@ data: - devices: all vgpu-devices: A800DX-80C: 1 + A800-1-5C: + - devices: all + vgpu-devices: + A800-1-5C: 7 + A800-1-5CME: + - devices: all + vgpu-devices: + A800-1-5CME: 1 + A800-1-10C: + - devices: all + vgpu-devices: + A800-1-10C: 4 + A800-2-10C: + - devices: all + vgpu-devices: + A800-2-10C: 3 + A800-3-20C: + - devices: all + vgpu-devices: + A800-3-20C: 2 + A800-4C: + - devices: all + vgpu-devices: + A800-4C: 10 + A800-4-20C: + - devices: all + vgpu-devices: + A800-4-20C: 1 + A800-5C: + - devices: all + vgpu-devices: + A800-5C: 8 + A800-7-40C: + - devices: all + vgpu-devices: + A800-7-40C: 1 + A800-8C: + - devices: all + vgpu-devices: + A800-8C: 5 + A800-10C: + - devices: all + vgpu-devices: + A800-10C: 4 + A800-20C: + - devices: all + vgpu-devices: + A800-20C: 2 + A800-40C: + - devices: all + vgpu-devices: + A800-40C: 1 + GH200-1-12C: + - devices: all + vgpu-devices: + GH200-1-12C: 7 + GH200-1-12CME: + - devices: all + vgpu-devices: + GH200-1-12CME: 1 + GH200-1-24C: + - devices: all + vgpu-devices: + GH200-1-24C: 4 + GH200-2-24C: + - devices: all + vgpu-devices: + GH200-2-24C: 3 + GH200-3-48C: + - devices: all + vgpu-devices: + GH200-3-48C: 2 + GH200-4-48C: + - devices: all + vgpu-devices: + GH200-4-48C: 1 + GH200-7-96C: + - devices: all + vgpu-devices: + GH200-7-96C: 1 + GH200-96C: + - devices: all + vgpu-devices: + GH200-96C: 1 + H20-1-12C: + - devices: all + vgpu-devices: + H20-1-12C: 7 + H20-1-12CME: + - devices: all + vgpu-devices: + H20-1-12CME: 1 + H20-1-24C: + - devices: all + vgpu-devices: + H20-1-24C: 4 + H20-2-24C: + - devices: all + vgpu-devices: + H20-2-24C: 3 + H20-3-48C: + - devices: all + vgpu-devices: + H20-3-48C: 2 + H20-4C: + - devices: all + vgpu-devices: + H20-4C: 24 + H20-4-48C: + - devices: all + vgpu-devices: + H20-4-48C: 1 + H20-6C: + - devices: all + vgpu-devices: + H20-6C: 16 + H20-7-96C: + - devices: all + vgpu-devices: + H20-7-96C: 1 + H20-8C: + - devices: all + vgpu-devices: + H20-8C: 12 + H20-12C: + - devices: all + vgpu-devices: + H20-12C: 8 + H20-16C: + - devices: all + vgpu-devices: + H20-16C: 6 + H20-24C: + - devices: all + vgpu-devices: + H20-24C: 4 + H20-32C: + - devices: all + vgpu-devices: + H20-32C: 3 + H20-48C: + - devices: all + vgpu-devices: + H20-48C: 2 + H20-96C: + - devices: all + vgpu-devices: + H20-96C: 1 + H100L-1-12C: + - devices: all + vgpu-devices: + H100L-1-12C: 7 + H100L-1-12CME: + - devices: all + vgpu-devices: + H100L-1-12CME: 1 + H100L-1-24C: + - devices: all + vgpu-devices: + H100L-1-24C: 4 + H100L-2-24C: + - devices: all + vgpu-devices: + H100L-2-24C: 3 + H100L-3-47C: + - devices: all + vgpu-devices: + H100L-3-47C: 2 + H100L-4C: + - devices: all + vgpu-devices: + H100L-4C: 23 + H100L-4-47C: + - devices: all + vgpu-devices: + H100L-4-47C: 1 + H100L-6C: + - devices: all + vgpu-devices: + H100L-6C: 15 + H100L-7-94C: + - devices: all + vgpu-devices: + H100L-7-94C: 1 + H100L-11C: + - devices: all + vgpu-devices: + H100L-11C: 8 + H100L-15C: + - devices: all + vgpu-devices: + H100L-15C: 6 + H100L-23C: + - devices: all + vgpu-devices: + H100L-23C: 4 + H100L-47C: + - devices: all + vgpu-devices: + H100L-47C: 2 + H100L-94C: + - devices: all + vgpu-devices: + H100L-94C: 1 + H100XL-1-12C: + - devices: all + vgpu-devices: + H100XL-1-12C: 7 + H100XL-1-12CME: + - devices: all + vgpu-devices: + H100XL-1-12CME: 1 + H100XL-1-24C: + - devices: all + vgpu-devices: + H100XL-1-24C: 4 + H100XL-2-24C: + - devices: all + vgpu-devices: + H100XL-2-24C: 3 + H100XL-3-47C: + - devices: all + vgpu-devices: + H100XL-3-47C: 2 + H100XL-4C: + - devices: all + vgpu-devices: + H100XL-4C: 23 + H100XL-4-47C: + - devices: all + vgpu-devices: + H100XL-4-47C: 1 + H100XL-6C: + - devices: all + vgpu-devices: + H100XL-6C: 15 + H100XL-7-94C: + - devices: all + vgpu-devices: + H100XL-7-94C: 1 + H100XL-11C: + - devices: all + vgpu-devices: + H100XL-11C: 8 + H100XL-15C: + - devices: all + vgpu-devices: + H100XL-15C: 6 + H100XL-23C: + - devices: all + vgpu-devices: + H100XL-23C: 4 + H100XL-47C: + - devices: all + vgpu-devices: + H100XL-47C: 2 + H100XL-94C: + - devices: all + vgpu-devices: + H100XL-94C: 1 + H100XM-1-10C: + - devices: all + vgpu-devices: + H100XM-1-10C: 7 + H100XM-1-10CME: + - devices: all + vgpu-devices: + H100XM-1-10CME: 1 + H100XM-1-20C: + - devices: all + vgpu-devices: + H100XM-1-20C: 4 + H100XM-2-20C: + - devices: all + vgpu-devices: + H100XM-2-20C: 3 + H100XM-3-40C: + - devices: all + vgpu-devices: + H100XM-3-40C: 2 + H100XM-4C: + - devices: all + vgpu-devices: + H100XM-4C: 20 + H100XM-4-40C: + - devices: all + vgpu-devices: + H100XM-4-40C: 1 + H100XM-5C: + - devices: all + vgpu-devices: + H100XM-5C: 16 + H100XM-7-80C: + - devices: all + vgpu-devices: + H100XM-7-80C: 1 + H100XM-8C: + - devices: all + vgpu-devices: + H100XM-8C: 10 + H100XM-10C: + - devices: all + vgpu-devices: + H100XM-10C: 8 + H100XM-16C: + - devices: all + vgpu-devices: + H100XM-16C: 5 + H100XM-20C: + - devices: all + vgpu-devices: + H100XM-20C: 4 + H100XM-40C: + - devices: all + vgpu-devices: + H100XM-40C: 2 + H100XM-80C: + - devices: all + vgpu-devices: + H100XM-80C: 1 + H100XS-1-8C: + - devices: all + vgpu-devices: + H100XS-1-8C: 7 + H100XS-1-8CME: + - devices: all + vgpu-devices: + H100XS-1-8CME: 1 + H100XS-1-16C: + - devices: all + vgpu-devices: + H100XS-1-16C: 4 + H100XS-2-16C: + - devices: all + vgpu-devices: + H100XS-2-16C: 3 + H100XS-3-32C: + - devices: all + vgpu-devices: + H100XS-3-32C: 2 + H100XS-4C: + - devices: all + vgpu-devices: + H100XS-4C: 16 + H100XS-4-32C: + - devices: all + vgpu-devices: + H100XS-4-32C: 1 + H100XS-7-64C: + - devices: all + vgpu-devices: + H100XS-7-64C: 1 + H100XS-8C: + - devices: all + vgpu-devices: + H100XS-8C: 8 + H100XS-16C: + - devices: all + vgpu-devices: + H100XS-16C: 4 + H100XS-32C: + - devices: all + vgpu-devices: + H100XS-32C: 2 + H100XS-64C: + - devices: all + vgpu-devices: + H100XS-64C: 1 H100-1-10C: - devices: all vgpu-devices: @@ -579,6 +935,10 @@ data: - devices: all vgpu-devices: H100-1-10CME: 1 + H100-1-20C: + - devices: all + vgpu-devices: + H100-1-20C: 4 H100-2-20C: - devices: all vgpu-devices: @@ -627,6 +987,122 @@ data: - devices: all vgpu-devices: H100-80C: 1 + H800L-1-12C: + - devices: all + vgpu-devices: + H800L-1-12C: 7 + H800L-1-12CME: + - devices: all + vgpu-devices: + H800L-1-12CME: 1 + H800L-1-24C: + - devices: all + vgpu-devices: + H800L-1-24C: 4 + H800L-2-24C: + - devices: all + vgpu-devices: + H800L-2-24C: 3 + H800L-3-47C: + - devices: all + vgpu-devices: + H800L-3-47C: 2 + H800L-4C: + - devices: all + vgpu-devices: + H800L-4C: 23 + H800L-4-47C: + - devices: all + vgpu-devices: + H800L-4-47C: 1 + H800L-6C: + - devices: all + vgpu-devices: + H800L-6C: 15 + H800L-7-94C: + - devices: all + vgpu-devices: + H800L-7-94C: 1 + H800L-11C: + - devices: all + vgpu-devices: + H800L-11C: 8 + H800L-15C: + - devices: all + vgpu-devices: + H800L-15C: 6 + H800L-23C: + - devices: all + vgpu-devices: + H800L-23C: 4 + H800L-47C: + - devices: all + vgpu-devices: + H800L-47C: 2 + H800L-94C: + - devices: all + vgpu-devices: + H800L-94C: 1 + H800XM-1-10C: + - devices: all + vgpu-devices: + H800XM-1-10C: 7 + H800XM-1-10CME: + - devices: all + vgpu-devices: + H800XM-1-10CME: 1 + H800XM-1-20C: + - devices: all + vgpu-devices: + H800XM-1-20C: 4 + H800XM-2-20C: + - devices: all + vgpu-devices: + H800XM-2-20C: 3 + H800XM-3-40C: + - devices: all + vgpu-devices: + H800XM-3-40C: 2 + H800XM-4C: + - devices: all + vgpu-devices: + H800XM-4C: 20 + H800XM-4-40C: + - devices: all + vgpu-devices: + H800XM-4-40C: 1 + H800XM-5C: + - devices: all + vgpu-devices: + H800XM-5C: 16 + H800XM-7-80C: + - devices: all + vgpu-devices: + H800XM-7-80C: 1 + H800XM-8C: + - devices: all + vgpu-devices: + H800XM-8C: 10 + H800XM-10C: + - devices: all + vgpu-devices: + H800XM-10C: 8 + H800XM-16C: + - devices: all + vgpu-devices: + H800XM-16C: 5 + H800XM-20C: + - devices: all + vgpu-devices: + H800XM-20C: 4 + H800XM-40C: + - devices: all + vgpu-devices: + H800XM-40C: 2 + H800XM-80C: + - devices: all + vgpu-devices: + H800XM-80C: 1 H800-1-10C: - devices: all vgpu-devices: @@ -635,6 +1111,10 @@ data: - devices: all vgpu-devices: H800-1-10CME: 1 + H800-1-20C: + - devices: all + vgpu-devices: + H800-1-20C: 4 H800-2-20C: - devices: all vgpu-devices: @@ -683,6 +1163,58 @@ data: - devices: all vgpu-devices: H800-80C: 1 + L2-1Q: + - devices: all + vgpu-devices: + L2-1Q: 24 + L2-2Q: + - devices: all + vgpu-devices: + L2-2Q: 12 + L2-3Q: + - devices: all + vgpu-devices: + L2-3Q: 8 + L2-4C: + - devices: all + vgpu-devices: + L2-4C: 6 + L2-4Q: + - devices: all + vgpu-devices: + L2-4Q: 6 + L2-6C: + - devices: all + vgpu-devices: + L2-6C: 4 + L2-6Q: + - devices: all + vgpu-devices: + L2-6Q: 4 + L2-8C: + - devices: all + vgpu-devices: + L2-8C: 3 + L2-8Q: + - devices: all + vgpu-devices: + L2-8Q: 3 + L2-12C: + - devices: all + vgpu-devices: + L2-12C: 2 + L2-12Q: + - devices: all + vgpu-devices: + L2-12Q: 2 + L2-24C: + - devices: all + vgpu-devices: + L2-24C: 1 + L2-24Q: + - devices: all + vgpu-devices: + L2-24Q: 1 L4-1Q: - devices: all vgpu-devices: @@ -735,338 +1267,318 @@ data: - devices: all vgpu-devices: L4-24Q: 1 - L40-1Q: + L20-1Q: - devices: all vgpu-devices: - L40-1Q: 32 - L40-2Q: + L20-1Q: 32 + L20-2Q: - devices: all vgpu-devices: - L40-2Q: 24 - L40-3Q: + L20-2Q: 24 + L20-3Q: - devices: all vgpu-devices: - L40-3Q: 16 - L40-4C: + L20-3Q: 16 + L20-4C: - devices: all vgpu-devices: - L40-4C: 12 - L40-4Q: + L20-4C: 12 + L20-4Q: - devices: all vgpu-devices: - L40-4Q: 12 - L40-6C: + L20-4Q: 12 + L20-6C: - devices: all vgpu-devices: - L40-6C: 8 - L40-6Q: + L20-6C: 8 + L20-6Q: - devices: all vgpu-devices: - L40-6Q: 8 - L40-8C: + L20-6Q: 8 + L20-8C: - devices: all vgpu-devices: - L40-8C: 6 - L40-8Q: + L20-8C: 6 + L20-8Q: - devices: all vgpu-devices: - L40-8Q: 6 - L40-12C: + L20-8Q: 6 + L20-12C: - devices: all vgpu-devices: - L40-12C: 4 - L40-12Q: + L20-12C: 4 + L20-12Q: - devices: all vgpu-devices: - L40-12Q: 4 - L40-16C: + L20-12Q: 4 + L20-16C: - devices: all vgpu-devices: - L40-16C: 3 - L40-16Q: + L20-16C: 3 + L20-16Q: - devices: all vgpu-devices: - L40-16Q: 3 - L40-24C: + L20-16Q: 3 + L20-24C: - devices: all vgpu-devices: - L40-24C: 2 - L40-24Q: + L20-24C: 2 + L20-24Q: - devices: all vgpu-devices: - L40-24Q: 2 - L40-48C: + L20-24Q: 2 + L20-48C: - devices: all vgpu-devices: - L40-48C: 1 - L40-48Q: + L20-48C: 1 + L20-48Q: - devices: all vgpu-devices: - L40-48Q: 1 - M6-0Q: + L20-48Q: 1 + L40S-1Q: - devices: all vgpu-devices: - M6-0Q: 16 - M6-1Q: + L40S-1Q: 32 + L40S-2Q: - devices: all vgpu-devices: - M6-1Q: 8 - M6-2Q: + L40S-2Q: 24 + L40S-3Q: - devices: all vgpu-devices: - M6-2Q: 4 - M6-4Q: + L40S-3Q: 16 + L40S-4C: - devices: all vgpu-devices: - M6-4Q: 2 - M6-8Q: + L40S-4C: 12 + L40S-4Q: - devices: all vgpu-devices: - M6-8Q: 1 - M10-0Q: + L40S-4Q: 12 + L40S-6C: - devices: all vgpu-devices: - M10-0Q: 16 - M10-1Q: + L40S-6C: 8 + L40S-6Q: - devices: all vgpu-devices: - M10-1Q: 8 - M10-2Q: + L40S-6Q: 8 + L40S-8C: - devices: all vgpu-devices: - M10-2Q: 4 - M10-4Q: + L40S-8C: 6 + L40S-8Q: - devices: all vgpu-devices: - M10-4Q: 2 - M10-8Q: + L40S-8Q: 6 + L40S-12C: - devices: all vgpu-devices: - M10-8Q: 1 - M60-0Q: + L40S-12C: 4 + L40S-12Q: - devices: all vgpu-devices: - M60-0Q: 16 - M60-1Q: + L40S-12Q: 4 + L40S-16C: - devices: all vgpu-devices: - M60-1Q: 8 - M60-2Q: + L40S-16C: 3 + L40S-16Q: - devices: all vgpu-devices: - M60-2Q: 4 - M60-4Q: + L40S-16Q: 3 + L40S-24C: - devices: all vgpu-devices: - M60-4Q: 2 - M60-8Q: + L40S-24C: 2 + L40S-24Q: - devices: all vgpu-devices: - M60-8Q: 1 - P4-1Q: + L40S-24Q: 2 + L40S-48C: - devices: all vgpu-devices: - P4-1Q: 8 - P4-2Q: + L40S-48C: 1 + L40S-48Q: - devices: all vgpu-devices: - P4-2Q: 4 - P4-4C: - - devices: all - vgpu-devices: - P4-4C: 2 - P4-4Q: - - devices: all - vgpu-devices: - P4-4Q: 2 - P4-8C: - - devices: all - vgpu-devices: - P4-8C: 1 - P4-8Q: - - devices: all - vgpu-devices: - P4-8Q: 1 - P6-1Q: - - devices: all - vgpu-devices: - P6-1Q: 16 - P6-2Q: + L40S-48Q: 1 + L40-1Q: - devices: all vgpu-devices: - P6-2Q: 8 - P6-4C: + L40-1Q: 32 + L40-2Q: - devices: all vgpu-devices: - P6-4C: 4 - P6-4Q: + L40-2Q: 24 + L40-3Q: - devices: all vgpu-devices: - P6-4Q: 4 - P6-8C: + L40-3Q: 16 + L40-4C: - devices: all vgpu-devices: - P6-8C: 2 - P6-8Q: + L40-4C: 12 + L40-4Q: - devices: all vgpu-devices: - P6-8Q: 2 - P6-16C: + L40-4Q: 12 + L40-6C: - devices: all vgpu-devices: - P6-16C: 1 - P6-16Q: + L40-6C: 8 + L40-6Q: - devices: all vgpu-devices: - P6-16Q: 1 - P40-1Q: + L40-6Q: 8 + L40-8C: - devices: all vgpu-devices: - P40-1Q: 24 - P40-2Q: + L40-8C: 6 + L40-8Q: - devices: all vgpu-devices: - P40-2Q: 12 - P40-3Q: + L40-8Q: 6 + L40-12C: - devices: all vgpu-devices: - P40-3Q: 8 - P40-4C: + L40-12C: 4 + L40-12Q: - devices: all vgpu-devices: - P40-4C: 6 - P40-4Q: + L40-12Q: 4 + L40-16C: - devices: all vgpu-devices: - P40-4Q: 6 - P40-6C: + L40-16C: 3 + L40-16Q: - devices: all vgpu-devices: - P40-6C: 4 - P40-6Q: + L40-16Q: 3 + L40-24C: - devices: all vgpu-devices: - P40-6Q: 4 - P40-8C: + L40-24C: 2 + L40-24Q: - devices: all vgpu-devices: - P40-8C: 3 - P40-8Q: + L40-24Q: 2 + L40-48C: - devices: all vgpu-devices: - P40-8Q: 3 - P40-12C: + L40-48C: 1 + L40-48Q: - devices: all vgpu-devices: - P40-12C: 2 - P40-12Q: + L40-48Q: 1 + RTX5000-Ada-1Q: - devices: all vgpu-devices: - P40-12Q: 2 - P40-24C: + RTX5000-Ada-1Q: 32 + RTX5000-Ada-2Q: - devices: all vgpu-devices: - P40-24C: 1 - P40-24Q: + RTX5000-Ada-2Q: 16 + RTX5000-Ada-4C: - devices: all vgpu-devices: - P40-24Q: 1 - P100C-1Q: + RTX5000-Ada-4C: 8 + RTX5000-Ada-4Q: - devices: all vgpu-devices: - P100C-1Q: 12 - P100C-2Q: + RTX5000-Ada-4Q: 8 + RTX5000-Ada-8C: - devices: all vgpu-devices: - P100C-2Q: 6 - P100C-4C: + RTX5000-Ada-8C: 4 + RTX5000-Ada-8Q: - devices: all vgpu-devices: - P100C-4C: 3 - P100C-4Q: + RTX5000-Ada-8Q: 4 + RTX5000-Ada-16C: - devices: all vgpu-devices: - P100C-4Q: 3 - P100C-6C: + RTX5000-Ada-16C: 2 + RTX5000-Ada-16Q: - devices: all vgpu-devices: - P100C-6C: 2 - P100C-6Q: + RTX5000-Ada-16Q: 2 + RTX5000-Ada-32C: - devices: all vgpu-devices: - P100C-6Q: 2 - P100C-12C: + RTX5000-Ada-32C: 1 + RTX5000-Ada-32Q: - devices: all vgpu-devices: - P100C-12C: 1 - P100C-12Q: + RTX5000-Ada-32Q: 1 + RTX5880-Ada-1Q: - devices: all vgpu-devices: - P100C-12Q: 1 - P100X-1Q: + RTX5880-Ada-1Q: 32 + RTX5880-Ada-2Q: - devices: all vgpu-devices: - P100X-1Q: 16 - P100X-2Q: + RTX5880-Ada-2Q: 24 + RTX5880-Ada-3Q: - devices: all vgpu-devices: - P100X-2Q: 8 - P100X-4C: + RTX5880-Ada-3Q: 16 + RTX5880-Ada-4C: - devices: all vgpu-devices: - P100X-4C: 4 - P100X-4Q: + RTX5880-Ada-4C: 12 + RTX5880-Ada-4Q: - devices: all vgpu-devices: - P100X-4Q: 4 - P100X-8C: + RTX5880-Ada-4Q: 12 + RTX5880-Ada-6C: - devices: all vgpu-devices: - P100X-8C: 2 - P100X-8Q: + RTX5880-Ada-6C: 8 + RTX5880-Ada-6Q: - devices: all vgpu-devices: - P100X-8Q: 2 - P100X-16C: + RTX5880-Ada-6Q: 8 + RTX5880-Ada-8C: - devices: all vgpu-devices: - P100X-16C: 1 - P100X-16Q: + RTX5880-Ada-8C: 6 + RTX5880-Ada-8Q: - devices: all vgpu-devices: - P100X-16Q: 1 - P100-1Q: + RTX5880-Ada-8Q: 6 + RTX5880-Ada-12C: - devices: all vgpu-devices: - P100-1Q: 16 - P100-2Q: + RTX5880-Ada-12C: 4 + RTX5880-Ada-12Q: - devices: all vgpu-devices: - P100-2Q: 8 - P100-4C: + RTX5880-Ada-12Q: 4 + RTX5880-Ada-16C: - devices: all vgpu-devices: - P100-4C: 4 - P100-4Q: + RTX5880-Ada-16C: 3 + RTX5880-Ada-16Q: - devices: all vgpu-devices: - P100-4Q: 4 - P100-8C: + RTX5880-Ada-16Q: 3 + RTX5880-Ada-24C: - devices: all vgpu-devices: - P100-8C: 2 - P100-8Q: + RTX5880-Ada-24C: 2 + RTX5880-Ada-24Q: - devices: all vgpu-devices: - P100-8Q: 2 - P100-16C: + RTX5880-Ada-24Q: 2 + RTX5880-Ada-48C: - devices: all vgpu-devices: - P100-16C: 1 - P100-16Q: + RTX5880-Ada-48C: 1 + RTX5880-Ada-48Q: - devices: all vgpu-devices: - P100-16Q: 1 + RTX5880-Ada-48Q: 1 RTX6000P-1Q: - devices: all vgpu-devices: @@ -1119,58 +1631,74 @@ data: - devices: all vgpu-devices: RTX6000P-24Q: 1 - RTX6000-1Q: + RTX6000-Ada-1Q: + - devices: all + vgpu-devices: + RTX6000-Ada-1Q: 32 + RTX6000-Ada-2Q: - devices: all vgpu-devices: - RTX6000-1Q: 24 - RTX6000-2Q: + RTX6000-Ada-2Q: 24 + RTX6000-Ada-3Q: - devices: all vgpu-devices: - RTX6000-2Q: 12 - RTX6000-3Q: + RTX6000-Ada-3Q: 16 + RTX6000-Ada-4C: - devices: all vgpu-devices: - RTX6000-3Q: 8 - RTX6000-4C: + RTX6000-Ada-4C: 12 + RTX6000-Ada-4Q: - devices: all vgpu-devices: - RTX6000-4C: 6 - RTX6000-4Q: + RTX6000-Ada-4Q: 12 + RTX6000-Ada-6C: - devices: all vgpu-devices: - RTX6000-4Q: 6 - RTX6000-6C: + RTX6000-Ada-6C: 8 + RTX6000-Ada-6Q: - devices: all vgpu-devices: - RTX6000-6C: 4 - RTX6000-6Q: + RTX6000-Ada-6Q: 8 + RTX6000-Ada-8C: - devices: all vgpu-devices: - RTX6000-6Q: 4 - RTX6000-8C: + RTX6000-Ada-8C: 6 + RTX6000-Ada-8Q: - devices: all vgpu-devices: - RTX6000-8C: 3 - RTX6000-8Q: + RTX6000-Ada-8Q: 6 + RTX6000-Ada-12C: - devices: all vgpu-devices: - RTX6000-8Q: 3 - RTX6000-12C: + RTX6000-Ada-12C: 4 + RTX6000-Ada-12Q: - devices: all vgpu-devices: - RTX6000-12C: 2 - RTX6000-12Q: + RTX6000-Ada-12Q: 4 + RTX6000-Ada-16C: - devices: all vgpu-devices: - RTX6000-12Q: 2 - RTX6000-24C: + RTX6000-Ada-16C: 3 + RTX6000-Ada-16Q: - devices: all vgpu-devices: - RTX6000-24C: 1 - RTX6000-24Q: + RTX6000-Ada-16Q: 3 + RTX6000-Ada-24C: - devices: all vgpu-devices: - RTX6000-24Q: 1 + RTX6000-Ada-24C: 2 + RTX6000-Ada-24Q: + - devices: all + vgpu-devices: + RTX6000-Ada-24Q: 2 + RTX6000-Ada-48C: + - devices: all + vgpu-devices: + RTX6000-Ada-48C: 1 + RTX6000-Ada-48Q: + - devices: all + vgpu-devices: + RTX6000-Ada-48Q: 1 RTX8000P-1Q: - devices: all vgpu-devices: @@ -1239,74 +1767,6 @@ data: - devices: all vgpu-devices: RTX8000P-48Q: 1 - RTX8000-1Q: - - devices: all - vgpu-devices: - RTX8000-1Q: 32 - RTX8000-2Q: - - devices: all - vgpu-devices: - RTX8000-2Q: 24 - RTX8000-3Q: - - devices: all - vgpu-devices: - RTX8000-3Q: 16 - RTX8000-4C: - - devices: all - vgpu-devices: - RTX8000-4C: 8 - RTX8000-4Q: - - devices: all - vgpu-devices: - RTX8000-4Q: 12 - RTX8000-6C: - - devices: all - vgpu-devices: - RTX8000-6C: 8 - RTX8000-6Q: - - devices: all - vgpu-devices: - RTX8000-6Q: 8 - RTX8000-8C: - - devices: all - vgpu-devices: - RTX8000-8C: 6 - RTX8000-8Q: - - devices: all - vgpu-devices: - RTX8000-8Q: 6 - RTX8000-12C: - - devices: all - vgpu-devices: - RTX8000-12C: 4 - RTX8000-12Q: - - devices: all - vgpu-devices: - RTX8000-12Q: 4 - RTX8000-16C: - - devices: all - vgpu-devices: - RTX8000-16C: 3 - RTX8000-16Q: - - devices: all - vgpu-devices: - RTX8000-16Q: 3 - RTX8000-24C: - - devices: all - vgpu-devices: - RTX8000-24C: 2 - RTX8000-24Q: - - devices: all - vgpu-devices: - RTX8000-24Q: 2 - RTX8000-48C: - - devices: all - vgpu-devices: - RTX8000-48C: 1 - RTX8000-48Q: - - devices: all - vgpu-devices: - RTX8000-48Q: 1 RTXA5000-1Q: - devices: all vgpu-devices: @@ -1728,42 +2188,6 @@ data: vgpu-devices: V100-16Q: 1 default: - - device-filter: "0x13BD10DE" - devices: all - vgpu-devices: - M10-4Q: 2 - - device-filter: "0x13F210DE" - devices: all - vgpu-devices: - M60-4Q: 2 - - device-filter: "0x13F310DE" - devices: all - vgpu-devices: - M6-4Q: 2 - - device-filter: "0x15F710DE" - devices: all - vgpu-devices: - P100C-6Q: 2 - - device-filter: "0x15F810DE" - devices: all - vgpu-devices: - P100-8Q: 2 - - device-filter: "0x15F910DE" - devices: all - vgpu-devices: - P100X-8Q: 2 - - device-filter: "0x1B3810DE" - devices: all - vgpu-devices: - P40-12Q: 2 - - device-filter: "0x1BB310DE" - devices: all - vgpu-devices: - P4-4Q: 2 - - device-filter: "0x1BB410DE" - devices: all - vgpu-devices: - P6-8Q: 2 - device-filter: "0x1DB110DE" devices: all vgpu-devices: @@ -1788,14 +2212,6 @@ data: devices: all vgpu-devices: V100S-16Q: 2 - - device-filter: "0x1E3010DE" - devices: all - vgpu-devices: - RTX8000-24Q: 2 - - device-filter: "0x1E3010DE" - devices: all - vgpu-devices: - RTX6000-12Q: 2 - device-filter: "0x1E7810DE" devices: all vgpu-devices: @@ -1832,6 +2248,10 @@ data: devices: all vgpu-devices: A30-12C: 2 + - device-filter: "0x20B710DE" + devices: all + vgpu-devices: + A30-12C: 2 - device-filter: "0x20B910DE" devices: all vgpu-devices: @@ -1848,6 +2268,14 @@ data: devices: all vgpu-devices: A800D-40C: 2 + - device-filter: "0x20FD10DE" + devices: all + vgpu-devices: + A800D-40C: 2 + - device-filter: "0x20F610DE" + devices: all + vgpu-devices: + A800-20C: 2 - device-filter: "0x223010DE" devices: all vgpu-devices: @@ -1868,18 +2296,62 @@ data: devices: all vgpu-devices: A10-12Q: 2 - - device-filter: "0x223810DE" + - device-filter: "0x232110DE" devices: all vgpu-devices: - A10M-10Q: 2 + H100L-47C: 2 - device-filter: "0x232210DE" devices: all vgpu-devices: H800-40C: 2 + - device-filter: "0x232410DE" + devices: all + vgpu-devices: + H800XM-40C: 2 + - device-filter: "0x232410DE" + devices: all + vgpu-devices: + H800XM-40C: 2 + - device-filter: "0x232910DE" + devices: all + vgpu-devices: + H20-48C: 2 + - device-filter: "0x232910DE" + devices: all + vgpu-devices: + H20-48C: 2 + - device-filter: "0x233010DE" + devices: all + vgpu-devices: + H100XM-40C: 2 + - device-filter: "0x233010DE" + devices: all + vgpu-devices: + H100XM-40C: 2 + - device-filter: "0x233610DE" + devices: all + vgpu-devices: + H100XM-40C: 2 - device-filter: "0x233110DE" devices: all vgpu-devices: H100-40C: 2 + - device-filter: "0x233710DE" + devices: all + vgpu-devices: + H100XS-32C: 2 + - device-filter: "0x233910DE" + devices: all + vgpu-devices: + H100XL-47C: 2 + - device-filter: "0x233A10DE" + devices: all + vgpu-devices: + H800L-47C: 2 + - device-filter: "0x234210DE" + devices: all + vgpu-devices: + GH200-96C: 1 - device-filter: "0x25B610DE" devices: all vgpu-devices: @@ -1888,10 +2360,38 @@ data: devices: all vgpu-devices: A2-8Q: 2 + - device-filter: "0x26B110DE" + devices: all + vgpu-devices: + RTX6000-Ada-24Q: 2 + - device-filter: "0x26B210DE" + devices: all + vgpu-devices: + RTX5000-Ada-16Q: 2 + - device-filter: "0x26B310DE" + devices: all + vgpu-devices: + RTX5880-Ada-24Q: 2 - device-filter: "0x26B510DE" devices: all vgpu-devices: L40-24Q: 2 + - device-filter: "0x26B910DE" + devices: all + vgpu-devices: + L40S-24Q: 2 + - device-filter: "0x26BA10DE" + devices: all + vgpu-devices: + L20-24Q: 2 + - device-filter: "0x26BA10DE" + devices: all + vgpu-devices: + L20-24Q: 2 + - device-filter: "0x27B610DE" + devices: all + vgpu-devices: + L2-12Q: 2 - device-filter: "0x27B810DE" devices: all vgpu-devices: diff --git a/assets/state-vgpu-manager/0200_role.yaml b/assets/state-vgpu-manager/0200_role.yaml index 834cf6d15..5b6818ca0 100644 --- a/assets/state-vgpu-manager/0200_role.yaml +++ b/assets/state-vgpu-manager/0200_role.yaml @@ -12,11 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - "get" diff --git a/assets/state-vgpu-manager/0210_clusterrole.yaml b/assets/state-vgpu-manager/0210_clusterrole.yaml index e088f3d4a..549734150 100644 --- a/assets/state-vgpu-manager/0210_clusterrole.yaml +++ b/assets/state-vgpu-manager/0210_clusterrole.yaml @@ -14,9 +14,26 @@ rules: - "" resources: - nodes - - pods verbs: - get - list - patch - watch +- apiGroups: + - "" + resources: + - pods + - pods/eviction + verbs: + - create + - delete + - get + - list + - patch + - update +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get diff --git a/assets/state-vgpu-manager/0500_daemonset.yaml b/assets/state-vgpu-manager/0500_daemonset.yaml index a46211b8c..5824c8810 100644 --- a/assets/state-vgpu-manager/0500_daemonset.yaml +++ b/assets/state-vgpu-manager/0500_daemonset.yaml @@ -84,6 +84,10 @@ spec: mountPath: /var/log - name: dev-log mountPath: /dev/log + - name: firmware-search-path + mountPath: /sys/module/firmware_class/parameters/path + - name: nv-firmware + mountPath: /lib/firmware # Only kept when OpenShift DriverToolkit side-car is enabled. - image: "FILLED BY THE OPERATOR" imagePullPolicy: IfNotPresent @@ -114,6 +118,10 @@ spec: mountPath: /var/log - name: dev-log mountPath: /dev/log + - name: firmware-search-path + mountPath: /sys/module/firmware_class/parameters/path + - name: nv-firmware + mountPath: /lib/firmware volumes: - name: run-nvidia hostPath: @@ -144,3 +152,10 @@ spec: - name: dev-log hostPath: path: /dev/log + - name: firmware-search-path + hostPath: + path: /sys/module/firmware_class/parameters/path + - name: nv-firmware + hostPath: + path: /run/nvidia/driver/lib/firmware + type: DirectoryOrCreate diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index f99de06f5..4ab58349d 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -18,7 +18,7 @@ metadata: features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" - olm.skipRange: '>=1.9.0 <23.9.2' + olm.skipRange: '>=1.9.0 <24.9.0' alm-examples: |- [ { @@ -101,6 +101,9 @@ metadata: "config": { "name": "", "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" } }, "gfd": { @@ -158,7 +161,7 @@ metadata: "driverType": "gpu", "repository": "nvcr.io/nvidia", "image": "driver", - "version": "sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e", + "version": "sha256:78e00fc8aa72f5f4925d54b4f9fbb725ca2168e890d90eadd6a497b7f31ccc18", "nodeSelector": {}, "manager": {}, "repoConfig": { @@ -186,51 +189,53 @@ metadata: capabilities: Deep Insights categories: AI/Machine Learning, OpenShift Optional certified: "true" - containerImage: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:master-latest-ubi8 + containerImage: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:main-latest createdAt: "Thu Jul 27 13:57:56 PDT 2023" description: Automate the management and monitoring of NVIDIA GPUs. provider: NVIDIA repository: http://github.com/NVIDIA/gpu-operator support: NVIDIA - name: gpu-operator-certified.v23.9.2 + name: gpu-operator-certified.v24.9.0 namespace: placeholder spec: apiservicedefinitions: {} relatedImages: - name: gpu-operator-image - image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:master-latest-ubi8 + image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:main-latest - name: dcgm-exporter-image - image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:011fb450af3fa2e8fe5d28d590e4c653631447bc23d149591ced3d89089c4f2c + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:857f7669ae5ce2a43a3b5691fa970085098321ef8e90a896fa8dda3dee5b1d2b - name: dcgm-image - image: nvcr.io/nvidia/cloud-native/dcgm@sha256:6a05d6a1923fda756aed0dddf7ed23a83c30cf1e6c519fc39dd70c0309ec8257 + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:e8398b2451996e5c64fc3855e7a3c93b667a85f69c0e259049d3b0012c21545d - name: container-toolkit-image - image: nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4 + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:f9d5652cba9ff27f9c4d17f5d09b4a5e4aa631b5cabb9cedf4d3e8c43d6847a4 - name: driver-image - image: nvcr.io/nvidia/driver@sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e + image: nvcr.io/nvidia/driver@sha256:78e00fc8aa72f5f4925d54b4f9fbb725ca2168e890d90eadd6a497b7f31ccc18 - name: driver-image-535 - image: nvcr.io/nvidia/driver@sha256:abda1ac56371d55917b96ff330109980f468e133c9d5705da0ef87429f14ccd7 - - name: driver-image-470 - image: nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af + image: nvcr.io/nvidia/driver@sha256:a64d182b9d8f024ee747710475867f938386f130db8e61227e6955343e815258 + - name: driver-image-560 + image: nvcr.io/nvidia/driver@sha256:38b66a8d44cab9e2c62da9e101f32cd9dbcb5e02d8e57b47671284d374ca3695 + - name: driver-image-565 + image: nvcr.io/nvidia/driver@sha256:d55b57938866e538acc3a71ca32f8cf87e71c591abd4a34695ee428e7ec2fa73 - name: device-plugin-image - image: nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320 + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea - name: gpu-feature-discovery-image - image: nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320 + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea - name: mig-manager-image - image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0 + image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:40830d3561c14743f484d45b498141f9e86b1308e16fae3978110783927264ab - name: init-container-image - image: nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d + image: nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190 - name: gpu-operator-validator-image - image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:master-latest-ubi8 + image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:main-latest - name: k8s-driver-manager-image - image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:27c44f4720a4abf780217bd5e7903e4a008ebdbcf71238c4f106a0c22654776c + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:b072c5793be65eee556eaff1b9cbbd115a1ef29982be95b2959adfcb4bc72382 - name: vfio-manager-image - image: nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d + image: nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190 - name: sandbox-device-plugin-image - image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:387021553dfb16aab633228d42f63f04fa932b4f46add07527f296dfe97e5148 + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:4ffa1cd2a6497eb647a89ed259dcfb007554737b9d80f69bc173a2c3cd72a1da - name: vgpu-device-manager-image - image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:925f4a47710e4318ed457930f5406174c1f6d28b1bf6b1bc310687fec0fde712 + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:7edd7a0413dcb39b6e3bcefaf06812f3293c8e480ca10783e821a561ed686200 - name: gdrcopy-image - image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:1ae0b923bc57f47bab046b50c50110f6914bbaffbfef704df34b3fe332db2e31 + image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:cf39d78ced7fb5727a9668ee2cd44b14bb7a23a95b83d5464b7d755740e02121 customresourcedefinitions: owned: - name: nvidiadrivers.nvidia.com @@ -584,7 +589,12 @@ spec: - apiGroups: - nvidia.com resources: - - '*' + - clusterpolicies + - clusterpolicies/finalizers + - clusterpolicies/status + - nvidiadrivers + - nvidiadrivers/finalizers + - nvidiadrivers/status verbs: - create - delete @@ -593,6 +603,7 @@ spec: - patch - update - watch + - deletecollection - apiGroups: - config.openshift.io resources: @@ -607,7 +618,14 @@ spec: resources: - securitycontextconstraints verbs: - - '*' + - use + - create + - get + - list + - watch + - patch + - update + - delete - apiGroups: - security.openshift.io resources: @@ -617,105 +635,92 @@ spec: resourceNames: - hostmount-anyuid - apiGroups: - - rbac.authorization.k8s.io + - image.openshift.io resources: - - clusterroles - - clusterrolebindings - - roles - - rolebindings + - imagestreams verbs: - - '*' + - get + - list + - watch - apiGroups: - - "" + - rbac.authorization.k8s.io resources: - - pods - - pods/eviction - - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - - configmaps - - secrets - - nodes - - namespaces - - serviceaccounts + - clusterroles + - clusterrolebindings verbs: - create - - delete - get - list - - patch - - update - watch + - update + - patch + - delete - apiGroups: - - apps + - "" resources: - - deployments - - daemonsets - - replicasets - - statefulsets + - namespaces verbs: - - create - - delete - get - list - - patch - - update + - create - watch + - update + - patch - apiGroups: - - apps + - "" resources: - - controllerrevisions + - nodes verbs: - get - list - watch + - update + - patch - apiGroups: - - node.k8s.io + - "" resources: - - runtimeclasses + - events + - pods + - pods/eviction verbs: + - create - get - list - - create - - update - watch + - update + - patch - delete - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - '*' - - apiGroups: - - monitoring.coreos.com + - apps resources: - - servicemonitors - - prometheusrules + - daemonsets verbs: - get - list - - create - watch - - update - - delete - apiGroups: - - image.openshift.io + - node.k8s.io resources: - - imagestreams + - runtimeclasses verbs: - get - list + - create + - update - watch + - delete - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: + - create - get - list - watch + - update + - patch + - delete permissions: - serviceAccountName: gpu-operator rules: @@ -725,42 +730,13 @@ spec: - roles - rolebindings verbs: - - '*' - - apiGroups: - - "" - resources: - - pods - - pods/eviction - - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - - configmaps - - secrets - verbs: - create - - delete - get - list - - patch - - update - watch - - apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - update - - watch + - patch + - delete - apiGroups: - apps resources: @@ -770,64 +746,58 @@ spec: - list - watch - apiGroups: - - monitoring.coreos.com + - apps resources: - - servicemonitors - - prometheusrules + - daemonsets verbs: - - get - create + - get - list - - update - watch - - delete - - apiGroups: - - apps - resourceNames: - - gpu-operator - resources: - - deployments/finalizers - verbs: - update + - patch + - delete - apiGroups: - "" resources: - - pods - verbs: - - get - - apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get - - apiGroups: - - nvidia.com - resources: - - '*' + - configmaps + - endpoints + - secrets + - services + - services/finalizers + - serviceaccounts verbs: - create - - delete - get - list - - patch - - update - watch + - update + - patch + - delete - apiGroups: - coordination.k8s.io resources: - leases verbs: - - '*' + - create + - get + - list + - watch + - update + - patch + - delete - apiGroups: - - apiextensions.k8s.io + - monitoring.coreos.com resources: - - customresourcedefinitions + - servicemonitors + - prometheusrules verbs: - get - list + - create - watch + - update + - delete deployments: - name: gpu-operator spec: @@ -850,7 +820,7 @@ spec: - --leader-elect - --leader-lease-renew-deadline - "60s" - image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:master-latest-ubi8 + image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:main-latest command: - gpu-operator livenessProbe: @@ -888,37 +858,39 @@ spec: fieldRef: fieldPath: metadata.namespace - name: "VALIDATOR_IMAGE" - value: "registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:master-latest-ubi8" + value: "registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:main-latest" - name: "GFD_IMAGE" - value: "nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea" - name: "CONTAINER_TOOLKIT_IMAGE" - value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:f9d5652cba9ff27f9c4d17f5d09b4a5e4aa631b5cabb9cedf4d3e8c43d6847a4" - name: "DCGM_IMAGE" - value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:6a05d6a1923fda756aed0dddf7ed23a83c30cf1e6c519fc39dd70c0309ec8257" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:e8398b2451996e5c64fc3855e7a3c93b667a85f69c0e259049d3b0012c21545d" - name: "DCGM_EXPORTER_IMAGE" - value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:011fb450af3fa2e8fe5d28d590e4c653631447bc23d149591ced3d89089c4f2c" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:857f7669ae5ce2a43a3b5691fa970085098321ef8e90a896fa8dda3dee5b1d2b" - name: "DEVICE_PLUGIN_IMAGE" - value: "nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea" - name: "DRIVER_IMAGE" - value: "nvcr.io/nvidia/driver@sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e" + value: "nvcr.io/nvidia/driver@sha256:78e00fc8aa72f5f4925d54b4f9fbb725ca2168e890d90eadd6a497b7f31ccc18" - name: "DRIVER_IMAGE-535" - value: "nvcr.io/nvidia/driver@sha256:abda1ac56371d55917b96ff330109980f468e133c9d5705da0ef87429f14ccd7" - - name: "DRIVER_IMAGE-470" - value: "nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af" + value: "nvcr.io/nvidia/driver@sha256:a64d182b9d8f024ee747710475867f938386f130db8e61227e6955343e815258" + - name: "DRIVER_IMAGE-560" + value: "nvcr.io/nvidia/driver@sha256:38b66a8d44cab9e2c62da9e101f32cd9dbcb5e02d8e57b47671284d374ca3695" + - name: "DRIVER_IMAGE-565" + value: "nvcr.io/nvidia/driver@sha256:d55b57938866e538acc3a71ca32f8cf87e71c591abd4a34695ee428e7ec2fa73" - name: "DRIVER_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:27c44f4720a4abf780217bd5e7903e4a008ebdbcf71238c4f106a0c22654776c" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:b072c5793be65eee556eaff1b9cbbd115a1ef29982be95b2959adfcb4bc72382" - name: "MIG_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0" + value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:40830d3561c14743f484d45b498141f9e86b1308e16fae3978110783927264ab" - name: "CUDA_BASE_IMAGE" - value: "nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d" + value: "nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190" - name: "VFIO_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d" + value: "nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190" - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" - value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:387021553dfb16aab633228d42f63f04fa932b4f46add07527f296dfe97e5148" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:4ffa1cd2a6497eb647a89ed259dcfb007554737b9d80f69bc173a2c3cd72a1da" - name: "VGPU_DEVICE_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:925f4a47710e4318ed457930f5406174c1f6d28b1bf6b1bc310687fec0fde712" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:7edd7a0413dcb39b6e3bcefaf06812f3293c8e480ca10783e821a561ed686200" - name: "GDRCOPY_IMAGE" - value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:1ae0b923bc57f47bab046b50c50110f6914bbaffbfef704df34b3fe332db2e31" + value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:cf39d78ced7fb5727a9668ee2cd44b14bb7a23a95b83d5464b7d755740e02121" terminationGracePeriodSeconds: 10 volumes: - hostPath: @@ -949,5 +921,5 @@ spec: maturity: stable provider: name: NVIDIA Corporation - version: 23.9.2 - replaces: gpu-operator-certified.v23.9.1 + version: 24.9.0 + replaces: gpu-operator-certified.v24.6.2 diff --git a/bundle/manifests/nvidia.com_clusterpolicies.yaml b/bundle/manifests/nvidia.com_clusterpolicies.yaml index 16e35bf4b..54e4a652b 100644 --- a/bundle/manifests/nvidia.com_clusterpolicies.yaml +++ b/bundle/manifests/nvidia.com_clusterpolicies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusterpolicies.nvidia.com spec: group: nvidia.com @@ -248,8 +248,8 @@ spec: type: object type: array hostPort: - description: 'HostPort represents host port that needs to be bound - for DCGM engine (Default: 5555)' + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' format: int32 type: integer image: @@ -410,15 +410,20 @@ spec: sets for NVIDIA DCGM Exporter items: description: |- - RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. - It defines ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config properties: action: default: replace description: |- - Action to perform based on regex matching. Default is 'replace'. - uppercase and lowercase actions require Prometheus >= 2.36. + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" enum: - replace - Replace @@ -444,39 +449,47 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. format: int64 type: integer regex: description: Regular expression against which the extracted - value is matched. Default is '(.*)' + value is matched. type: string replacement: description: |- - Replacement value against which a regex replace is performed if the - regular expression matches. Regex capture groups are available. Default is '$1' + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. + description: Separator is the string between concatenated + SourceLabels. type: string sourceLabels: description: |- - The source labels select values from existing labels. Their content is concatenated - using the configured separator and matched against the configured regular expression - for the replace, keep, and drop actions. + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. items: - description: LabelName is a valid Prometheus label - name which may only contain ASCII letters, numbers, - as well as underscores. + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: description: |- - Label to which the resulting value is written in a replace action. - It is mandatory for replace actions. Regex capture groups are available. + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. type: string type: object type: array @@ -538,6 +551,15 @@ spec: items: type: string type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object repository: description: NVIDIA Device Plugin image repository type: string @@ -926,9 +948,9 @@ spec: type: boolean timeoutSeconds: default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -944,9 +966,9 @@ spec: type: string timeoutSeconds: default: 0 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -1146,6 +1168,24 @@ spec: description: GFD image tag type: string type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object kataManager: description: KataManager component spec properties: @@ -2269,16 +2309,8 @@ spec: description: Conditions is a list of conditions representing the ClusterPolicy's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -2319,12 +2351,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/bundle/manifests/nvidia.com_nvidiadrivers.yaml b/bundle/manifests/nvidia.com_nvidiadrivers.yaml index 317972fd2..c49059a38 100644 --- a/bundle/manifests/nvidia.com_nvidiadrivers.yaml +++ b/bundle/manifests/nvidia.com_nvidiadrivers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: nvidiadrivers.nvidia.com spec: group: nvidia.com @@ -357,11 +357,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -389,11 +391,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -406,6 +410,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -450,11 +455,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -482,14 +489,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -709,16 +719,8 @@ spec: description: Conditions is a list of conditions representing the NVIDIADriver's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -759,12 +761,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/bundle/metadata/annotations.yaml b/bundle/metadata/annotations.yaml index f13bb0580..50d49a56b 100644 --- a/bundle/metadata/annotations.yaml +++ b/bundle/metadata/annotations.yaml @@ -1,6 +1,6 @@ annotations: - operators.operatorframework.io.bundle.channels.v1: stable,v23.9 - operators.operatorframework.io.bundle.channel.default.v1: v23.9 + operators.operatorframework.io.bundle.channels.v1: stable,v24.9 + operators.operatorframework.io.bundle.channel.default.v1: v24.9 operators.operatorframework.io.bundle.manifests.v1: manifests/ operators.operatorframework.io.bundle.mediatype.v1: registry+v1 operators.operatorframework.io.bundle.metadata.v1: metadata/ @@ -14,4 +14,4 @@ annotations: operatorframework.io/suggested-namespace: nvidia-gpu-operator # Annotations to specify OCP versions compatibility. - com.redhat.openshift.versions: v4.9-v4.15 + com.redhat.openshift.versions: v4.12-v4.17 diff --git a/bundle/v24.3.0/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/v24.3.0/manifests/gpu-operator-certified.clusterserviceversion.yaml new file mode 100644 index 000000000..11ceba75d --- /dev/null +++ b/bundle/v24.3.0/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -0,0 +1,956 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + labels: + operatorframework.io/arch.arm64: supported + operatorframework.io/arch.amd64: supported + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + annotations: + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + features.operators.openshift.io/cnf: "false" + features.operators.openshift.io/cni: "false" + features.operators.openshift.io/csi: "false" + olm.skipRange: '>=1.9.0 <24.3.0' + alm-examples: |- + [ + { + "apiVersion": "nvidia.com/v1", + "kind": "ClusterPolicy", + "metadata": { + "name": "gpu-cluster-policy" + }, + "spec": { + "operator": { + "defaultRuntime": "crio", + "use_ocp_driver_toolkit": true, + "initContainer": { + } + }, + "sandboxWorkloads": { + "enabled": false, + "defaultWorkload": "container" + }, + "driver": { + "enabled": true, + "useNvidiaDriverCRD": false, + "useOpenKernelModules": false, + "upgradePolicy": { + "autoUpgrade": true, + "drain": { + "deleteEmptyDir": false, + "enable": false, + "force": false, + "timeoutSeconds": 300 + }, + "maxParallelUpgrades": 1, + "maxUnavailable": "25%", + "podDeletion": { + "deleteEmptyDir": false, + "force": false, + "timeoutSeconds": 300 + }, + "waitForCompletion": { + "timeoutSeconds": 0 + } + }, + "repoConfig": { + "configMapName": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "configMapName": "" + }, + "virtualTopology": { + "config": "" + }, + "kernelModuleConfig": { + "name": "" + } + }, + "dcgmExporter": { + "enabled": true, + "config": { + "name": "" + }, + "serviceMonitor": { + "enabled": true + } + }, + "dcgm": { + "enabled": true + }, + "daemonsets": { + "updateStrategy": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "1" + } + }, + "devicePlugin": { + "enabled": true, + "config": { + "name": "", + "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" + } + }, + "gfd": { + "enabled": true + }, + "migManager": { + "enabled": true + }, + "nodeStatusExporter": { + "enabled": true + }, + "mig": { + "strategy": "single" + }, + "toolkit": { + "enabled": true + }, + "validator": { + "plugin": { + "env": [ + { + "name": "WITH_WORKLOAD", + "value": "false" + } + ] + } + }, + "vgpuManager": { + "enabled": false + }, + "vgpuDeviceManager": { + "enabled": true + }, + "sandboxDevicePlugin": { + "enabled": true + }, + "vfioManager": { + "enabled": true + }, + "gds": { + "enabled": false + }, + "gdrcopy": { + "enabled": false + } + } + }, + { + "apiVersion": "nvidia.com/v1alpha1", + "kind": "NVIDIADriver", + "metadata": { + "name": "gpu-driver" + }, + "spec": { + "driverType": "gpu", + "repository": "nvcr.io/nvidia", + "image": "driver", + "version": "sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3", + "nodeSelector": {}, + "manager": {}, + "repoConfig": { + "name": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "name": "" + }, + "virtualTopologyConfig": { + "name": "" + }, + "kernelModuleConfig": { + "name": "" + } + } + } + ] + operators.operatorframework.io/builder: operator-sdk-v1.4.0 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operatorframework.io/suggested-namespace: nvidia-gpu-operator + capabilities: Deep Insights + categories: AI/Machine Learning, OpenShift Optional + certified: "true" + containerImage: nvcr.io/nvidia/gpu-operator@sha256:041e75a3df84039c2dbbd4b9d67763bd212138822dbb6dbc0008858c1c6eff8d + createdAt: "Tue Apr 30 14:18:26 PDT 2024" + description: Automate the management and monitoring of NVIDIA GPUs. + provider: NVIDIA + repository: http://github.com/NVIDIA/gpu-operator + support: NVIDIA + name: gpu-operator-certified.v24.3.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + relatedImages: + - name: gpu-operator-image + image: nvcr.io/nvidia/gpu-operator@sha256:041e75a3df84039c2dbbd4b9d67763bd212138822dbb6dbc0008858c1c6eff8d + - name: dcgm-exporter-image + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:ecb867c5787e44f1889b257e6c03b07748689a7b293c8f1affb1d4cb449b235c + - name: dcgm-image + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:2442fd2ec0ee29746abbced727c53741f017e8f3f615321c1ec42c1c3ffa0b12 + - name: container-toolkit-image + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:7798eb9b9424e09959fc808596f87bdb68ac5740174123c6646dee2166d3e7d2 + - name: driver-image + image: nvcr.io/nvidia/driver@sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3 + - name: driver-image-535 + image: nvcr.io/nvidia/driver@sha256:a836ccbe21da735aee1c39b81060ed5e2fdb4ffa339874baaf4634f1e9259f74 + - name: driver-image-470 + image: nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af + - name: device-plugin-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:80674b19898ecf9ed6949e39674da769d6feeeb01bea54b914ef9ff502834f49 + - name: gpu-feature-discovery-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:80674b19898ecf9ed6949e39674da769d6feeeb01bea54b914ef9ff502834f49 + - name: mig-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:d8774d6afd37110199512636172823ae8749e5ea2e13760f57c255a74f47018c + - name: init-container-image + image: nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a + - name: gpu-operator-validator-image + image: nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:2edc1d4ed555830e70010c82558936198f5faa86fc29ecf5698219145102cfcc + - name: k8s-driver-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:0c77725de8c42d248ed825453efd2e005f4900f4be384fd23084f6b721ddd0e0 + - name: vfio-manager-image + image: nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a + - name: sandbox-device-plugin-image + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:2cc2119d20515f8ca7a61a0f3932578d69a45bcdea49e6c320a89c56f105e7d9 + - name: vgpu-device-manager-image + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:f0e4e14a3081417be8e6a5d855fb0cb69e1b63bc54a74f17e6d7084abe275588 + - name: gdrcopy-image + image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:1ae0b923bc57f47bab046b50c50110f6914bbaffbfef704df34b3fe332db2e31 + customresourcedefinitions: + owned: + - name: nvidiadrivers.nvidia.com + kind: NVIDIADriver + version: v1alpha1 + displayName: NVIDIADriver + description: NVIDIADriver allows you to deploy the NVIDIA driver + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + statusDescriptors: + - description: The current state of the driver. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + - name: clusterpolicies.nvidia.com + kind: ClusterPolicy + version: v1 + group: nvidia.com + displayName: ClusterPolicy + description: ClusterPolicy allows you to configure the GPU Operator + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: Deployment + name: '' + version: apps/v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: GPU Operator config + displayName: GPU Operator config + path: operator + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: operator.validator.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: Image pull secrets + displayName: Image pull secrets + path: operator.validator.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - description: NVIDIA GPU/vGPU Driver config + displayName: NVIDIA GPU/vGPU Driver config + path: driver + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: driver.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: driver.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: driver.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: driver.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA DCGM Exporter config + displayName: NVIDIA DCGM Exporter config + path: dcgmExporter + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: dcgmExporter.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: dcgmExporter.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: dcgmExporter.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: dcgmExporter.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: dcgmExporter.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Device Plugin config + displayName: NVIDIA Device Plugin config + path: devicePlugin + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: devicePlugin.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: devicePlugin.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: devicePlugin.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: devicePlugin.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: devicePlugin.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: GPU Feature Discovery Plugin config + displayName: GPU Feature Discovery Plugin config + path: gfd + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: gfd.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: gfd.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: gfd.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: gfd.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: gfd.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Container Toolkit config + displayName: NVIDIA Container Toolkit config + path: toolkit + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: toolkit.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: toolkit.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: toolkit.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: toolkit.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: toolkit.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - displayName: NVIDIA DCGM config + description: NVIDIA DCGM config + path: dcgm + - displayName: Validator config + description: Validator config + path: validator + - displayName: Node Status Exporter config + description: Node Status Exporter config + path: nodeStatusExporter + - displayName: Daemonsets config + description: Daemonsets config + path: daemonsets + - displayName: MIG config + description: MIG config + path: mig + - displayName: NVIDIA MIG Manager config + description: NVIDIA MIG Manager config + path: migManager + - displayName: PodSecurityPolicy config + description: PodSecurityPolicy config + path: psp + - displayName: NVIDIA GPUDirect Storage config + description: NVIDIA GPUDirect Storage config + path: gds + - displayName: Sandbox Workloads config + description: Sandbox Workloads config + path: sandboxWorkloads + - displayName: NVIDIA vGPU Manager config + description: NVIDIA vGPU Manager config + path: vgpuManager + - displayName: NVIDIA vGPU Device Manager config + description: NVIDIA vGPU Device Manager config + path: vgpuDeviceManager + - displayName: VFIO Manager config + description: VFIO Manager config + path: vfioManager + - displayName: NVIDIA Sandbox Device Plugin config + description: NVIDIA Sandbox Device Plugin config + path: sandboxDevicePlugin + statusDescriptors: + - description: The current state of the operator. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + displayName: NVIDIA GPU Operator + description: > + Kubernetes provides access to special hardware resources such as NVIDIA + GPUs, NICs, Infiniband adapters and other devices through the [device plugin + framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). + However, configuring and managing nodes with these hardware resources + requires configuration of multiple software components such as drivers, + container runtimes or other libraries which are difficult and prone to + errors. + + The NVIDIA GPU Operator uses the [operator + framework](https://cloud.redhat.com/blog/introducing-the-operator-framework) within + Kubernetes to automate the management of all NVIDIA software components + needed to provision and monitor GPUs. + These components include the NVIDIA drivers (to enable CUDA), Kubernetes + device plugin for GPUs, the NVIDIA Container Runtime, automatic node + labelling and NVIDIA DCGM exporter. + + Visit the official site of the [GPU Operator](https://github.com/NVIDIA/gpu-operator) for more information. + For getting started with using the GPU Operator with OpenShift, see the instructions + [here](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/contents.html). + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAB2lBMVEUAAAD///8EBAN3uQACAgIAAAQJDQUCAgB1tgAHCQf+/v5Ufg5Hagxxqwt+xgJ3uAB9wwB4vQBRUVEeLA3e3t5nZ2coKCgODg4FBwZ9wwR6wAJ4vADz8/MbGxt5tw1vpw1/wgoOFwkLDwh9xQH5+fny8vLw8PDFxcWysrKFhYVvb282NjYyMjIqKioXFxdikxRYgxNCYxJQdhFqoQ9xrg16ugxyqgyAxQkEBQj7+/v29vbIyMhjY2NbW1tHR0cvLy8kJCQdHR0ZGRlKbxJ8uhFNcxFVgBAxSBBgkg93tQ50sA4qPg4XIg18vwsbKQsSGgsLCwsMEwqCyQeByQFztADPz8+/v7+6urqWlpZra2tKSkogICASEhJmmRE8XBA5VRA2UBBonA9biA9GaQ4sQg4jMw4mOQ0aJw2GzgsUHgttpAqJ0Ql/wQWG0AJ8vwF0uQCtra2jo6OQkJB9fX1VVVVCQkI9PT0iIiIUFBRSfBNgjhA7WRBGZw+GywmFzgaAyASBxQN2twDb29u2traenp6Kiop+fn53d3dzc3NyqRV4sxM/YBNAXRElNhBjlQ+IzA00TQ16vgxJbgp6vAl4tgJ3vgDs7Ozn5+fa2trS0tJCXRY6VBV6thSL1gf4nFdFAAAD80lEQVRYw+zSOXPaQBgG4He0LJJmbGRGDUIzuvgBQiAEPfcdwC33DTbUtmOwSyc+4iRucvzXRImLFJmRShc8xXbfu+9+szg4OHjjAsH/iFD49q7rqM6xc/wPtWyBhS8sC94ObWRCZDksh1+RzmcEfI0DoPrjylEkSTgViMs9udjYTwMG4Gf51Z1BM81ioRwit+QvgYsdUQZeKFr3ladyKXvVr+pAM5uKcmRLXFzoCIxn+0i/8lSaBMHnfi7qowfQuZnm3PuFPwGs13zD3NlViozY/z4YD6/TCQORbPr2q78GLB0ou5IO40pd5AxQZnJ83m2y9Ju2JYKfgEhWC18aEIfrZLURHwQC0B87ySZwHxX8BNDWB1KfQfyxT2TA24uPQMt8yTWA3obz8wQGlhTN06Z900MkuJLrYu3u5LkK9LTtGRF8NEDLeSnXYLUdHUFVlpPqTa4IamlhJZ464biY1w4CKGrROOW7uwLlV+Q02lanCF6cbSoPVLzUfPwDll5I9T6WyXWhZre1yjiI6VCSzCWY3+FKaAwGHngzpEygx6+V6Uzk6TJR7yhWxJ1bFgTPJ7gMc58aUCq+n+qNT6Pn8y/xOcCiZZVjnJ+AAPhEuj0SKZ9bL9ZpNS9SgM6z9p5w3jt43cMvecfWBhm7dtfEpfhYMDBYpFd7mDZIAxPCFKgBhB0hkWbE2wVMyqycfhOMEiebSzFz5IMTEjw7E87UFj4GVR7GXqaSkoIcISEc/I38/PwhOTUMRBrADgwK09zgYGUBqbwcARiQyp3Eyk6kC4BloqtbJTcaSHIHShALWFmBSRuCWBGC+AtDMAAGIpAAc9mBiB0sCLSXHUSygxSxEIoE7IKEgbhopKgogC96x04QCMMw/H0cG6f0cEmBHaLc7FFQzApoTLwtQgWUWo26glx2mzGkyoHM1PPMO/NrnSH8e2QAiRsZ8S3ZuJoW5Udg5moGoMRLN2gAnkcUctueJ1gADsdtlZ2AgmSYoaDZBXwRctcwy6HN3XX/wfnTnA7Q5x0S0Gku4wHpe7Ql8Mbtu4TqC3qcADGtUl4O3eK0AkZdKH1mU/a6MFQGA7pQGoAVoAuuPYZlLJF2BawVLLjwac6Q8wUax61/CpKQAT6ZX3hFqoqqAFvuf4AzM+NgsoBS/wcSOD7SFzyf6CE9UQK9II1MRvIJm8QSgsLiBZuypsAWKyARElgx5FcLv1N4nFLbB45Sh6+TzsQRtn7bz/B3fS9GQ12bgUE2PKycQbwgXD0SWLwVhpZFq4eHhWloOjLoqGvoRYRGAR2vp2EtpNUaTUpiRAizMAEhKNXpYZNnAUlBCSgFYTIxQTlMMJNGwSgYBdQHAFsKs+/bUkeyAAAAAElFTkSuQmCC + mediatype: image/png + install: + spec: + clusterPermissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - nvidia.com + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - config.openshift.io + resources: + - clusterversions + - proxies + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - '*' + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - hostmount-anyuid + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/eviction + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - nodes + - namespaces + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch + - apiGroups: + - node.k8s.io + resources: + - runtimeclasses + verbs: + - get + - list + - create + - update + - watch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - '*' + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - list + - create + - watch + - update + - delete + - apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + permissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/eviction + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - create + - list + - update + - watch + - delete + - apiGroups: + - apps + resourceNames: + - gpu-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - apiGroups: + - nvidia.com + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - '*' + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + deployments: + - name: gpu-operator + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + strategy: {} + template: + metadata: + labels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + nvidia.com/gpu-driver-upgrade-drain.skip: "true" + spec: + priorityClassName: system-node-critical + containers: + - args: + - --leader-elect + - --leader-lease-renew-deadline + - "60s" + image: nvcr.io/nvidia/gpu-operator@sha256:041e75a3df84039c2dbbd4b9d67763bd212138822dbb6dbc0008858c1c6eff8d + command: + - gpu-operator + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: gpu-operator + ports: + - name: metrics + containerPort: 8080 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "VALIDATOR_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:2edc1d4ed555830e70010c82558936198f5faa86fc29ecf5698219145102cfcc" + - name: "GFD_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:80674b19898ecf9ed6949e39674da769d6feeeb01bea54b914ef9ff502834f49" + - name: "CONTAINER_TOOLKIT_IMAGE" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:7798eb9b9424e09959fc808596f87bdb68ac5740174123c6646dee2166d3e7d2" + - name: "DCGM_IMAGE" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:2442fd2ec0ee29746abbced727c53741f017e8f3f615321c1ec42c1c3ffa0b12" + - name: "DCGM_EXPORTER_IMAGE" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:ecb867c5787e44f1889b257e6c03b07748689a7b293c8f1affb1d4cb449b235c" + - name: "DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:80674b19898ecf9ed6949e39674da769d6feeeb01bea54b914ef9ff502834f49" + - name: "DRIVER_IMAGE" + value: "nvcr.io/nvidia/driver@sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3" + - name: "DRIVER_IMAGE-535" + value: "nvcr.io/nvidia/driver@sha256:a836ccbe21da735aee1c39b81060ed5e2fdb4ffa339874baaf4634f1e9259f74" + - name: "DRIVER_IMAGE-470" + value: "nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af" + - name: "DRIVER_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:0c77725de8c42d248ed825453efd2e005f4900f4be384fd23084f6b721ddd0e0" + - name: "MIG_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:d8774d6afd37110199512636172823ae8749e5ea2e13760f57c255a74f47018c" + - name: "CUDA_BASE_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a" + - name: "VFIO_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a" + - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:2cc2119d20515f8ca7a61a0f3932578d69a45bcdea49e6c320a89c56f105e7d9" + - name: "VGPU_DEVICE_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:f0e4e14a3081417be8e6a5d855fb0cb69e1b63bc54a74f17e6d7084abe275588" + - name: "GDRCOPY_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:1ae0b923bc57f47bab046b50c50110f6914bbaffbfef704df34b3fe332db2e31" + terminationGracePeriodSeconds: 10 + volumes: + - hostPath: + path: /etc/os-release + name: host-os-release + serviceAccountName: gpu-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - gpu + - cuda + - compute + - operator + - deep learning + - monitoring + - tesla + maintainers: + - name: NVIDIA + email: operator_feedback@nvidia.com + maturity: stable + provider: + name: NVIDIA Corporation + version: 24.3.0 + replaces: gpu-operator-certified.v23.9.2 diff --git a/bundle/v24.3.0/manifests/nvidia.com_clusterpolicies.yaml b/bundle/v24.3.0/manifests/nvidia.com_clusterpolicies.yaml new file mode 100644 index 000000000..f52a487b3 --- /dev/null +++ b/bundle/v24.3.0/manifests/nvidia.com_clusterpolicies.yaml @@ -0,0 +1,2386 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: clusterpolicies.nvidia.com +spec: + group: nvidia.com + names: + kind: ClusterPolicy + listKind: ClusterPolicyList + plural: clusterpolicies + singular: clusterpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterPolicy is the Schema for the clusterpolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterPolicySpec defines the desired state of ClusterPolicy + properties: + ccManager: + description: CCManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + defaultMode: + description: Default CC mode setting for compatible GPUs on the + node + enum: + - "on" + - "off" + - devtools + type: string + enabled: + description: Enabled indicates if deployment of CC Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: CC Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: CC Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: CC Manager image tag + type: string + type: object + cdi: + description: CDI configures how the Container Device Interface is + used in the cluster + properties: + default: + default: false + description: Default indicates whether to use CDI as the default + mechanism for providing GPU access to containers. + type: boolean + enabled: + default: false + description: Enabled indicates whether CDI can be used to make + GPUs accessible to containers. + type: boolean + type: object + daemonsets: + description: Daemonset defines common configuration for all Daemonsets + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + priorityClassName: + type: string + rollingUpdate: + description: 'Optional: Configuration for rolling update of all + DaemonSet pods' + properties: + maxUnavailable: + type: string + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + updateStrategy: + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + type: string + type: object + dcgm: + description: DCGM component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Hostengine + as a separate pod is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + hostPort: + description: 'HostPort represents host port that needs to be bound + for DCGM engine (Default: 5555)' + format: int32 + type: integer + image: + description: NVIDIA DCGM image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA DCGM image tag + type: string + type: object + dcgmExporter: + description: DCGMExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom metrics configuration for NVIDIA + DCGM Exporter' + properties: + name: + description: ConfigMap name with file dcgm-metrics.csv for + metrics to be collected by NVIDIA DCGM Exporter + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Exporter + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA DCGM Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM Exporter image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serviceMonitor: + description: 'Optional: ServiceMonitor configuration for NVIDIA + DCGM Exporter' + properties: + additionalLabels: + additionalProperties: + type: string + description: AdditionalLabels to add to ServiceMonitor instance + for NVIDIA DCGM Exporter + type: object + enabled: + description: Enabled indicates if ServiceMonitor is deployed + for NVIDIA DCGM Exporter + type: boolean + honorLabels: + description: HonorLabels chooses the metric’s labels on collisions + with target labels. + type: boolean + interval: + description: |- + Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. + Supported units: y, w, d, h, m, s, ms + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + relabelings: + description: Relabelings allows to rewrite labels on metric + sets for NVIDIA DCGM Exporter + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + + Regex capture groups are available. + type: string + type: object + type: array + type: object + version: + description: NVIDIA DCGM Exporter image tag + type: string + type: object + devicePlugin: + description: DevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Configuration for the NVIDIA Device Plugin + via the ConfigMap' + properties: + default: + description: Default config name within the ConfigMap for + the NVIDIA Device Plugin config + type: string + name: + description: ConfigMap name for NVIDIA Device Plugin config + including shared config between plugin and GFD + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Device + Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object + repository: + description: NVIDIA Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Device Plugin image tag + type: string + type: object + driver: + description: Driver component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for + NVIDIA Driver container' + properties: + name: + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Driver + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters + for the NVIDIA Driver' + properties: + name: + type: string + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU + licensing' + properties: + configMapName: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System + is used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver + Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + rdma: + description: GPUDirectRDMASpec defines the properties for nvidia-peermem + deployment + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled + through GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + configMapName: + type: string + type: object + repository: + description: NVIDIA Driver image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + upgradePolicy: + description: Driver auto-upgrade settings + properties: + autoUpgrade: + default: false + description: |- + AutoUpgrade is a global switch for automatic upgrade feature + if set to false all other options are ignored + type: boolean + drain: + description: DrainSpec describes configuration for node drain + during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the node is drained) + type: boolean + enable: + default: false + description: Enable indicates if node draining is allowed + during upgrade + type: boolean + force: + default: false + description: Force indicates if force draining is allowed + type: boolean + podSelector: + description: |- + PodSelector specifies a label selector to filter pods on the node that need to be drained + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 300 + description: TimeoutSecond specifies the length of time + in seconds to wait before giving up drain, zero means + infinite + minimum: 0 + type: integer + type: object + maxParallelUpgrades: + default: 1 + description: |- + MaxParallelUpgrades indicates how many nodes can be upgraded in parallel + 0 means no limit, all nodes will be upgraded in parallel + minimum: 0 + type: integer + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. + Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). + Absolute number is calculated from percentage by rounding up. + By default, a fixed value of 25% is used. + x-kubernetes-int-or-string: true + podDeletion: + description: PodDeletionSpec describes configuration for deletion + of pods using special resources during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the pod is deleted) + type: boolean + force: + default: false + description: Force indicates if force deletion is allowed + type: boolean + timeoutSeconds: + default: 300 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + waitForCompletion: + description: WaitForCompletionSpec describes the configuration + for waiting on job completions + properties: + podSelector: + description: |- + PodSelector specifies a label selector for the pods to wait for completion + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 0 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + type: object + useNvidiaDriverCRD: + description: UseNvidiaDriverCRD indicates if the deployment of + NVIDIA Driver is managed by the NVIDIADriver CRD type + type: boolean + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA + Driver using pre-compiled modules is enabled + type: boolean + version: + description: NVIDIA Driver image tag + type: string + virtualTopology: + description: 'Optional: Virtual Topology Daemon configuration + for NVIDIA vGPU drivers' + properties: + config: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + type: object + gdrcopy: + description: GDRCopy component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + Operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GDRCopy driver image repository + type: string + version: + description: NVIDIA GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS components(Experimental) + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + gfd: + description: GPUFeatureDiscovery spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of GPU Feature Discovery + Plugin is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GFD image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GFD image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: GFD image tag + type: string + type: object + kataManager: + description: KataManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: Kata Manager config + properties: + artifactsDir: + default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses + description: |- + ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) + are placed on the local filesystem. + type: string + runtimeClasses: + description: RuntimeClasses is a list of kata runtime classes + to configure. + items: + description: RuntimeClass defines the configuration for + a kata RuntimeClass + properties: + artifacts: + description: Artifacts are the kata artifacts associated + with the runtime class. + properties: + pullSecret: + description: PullSecret is the secret used to pull + the OCI artifact. + type: string + url: + description: |- + URL is the path to the OCI artifact (payload) containing all artifacts + associated with a kata runtime class. + type: string + required: + - url + type: object + name: + description: Name is the name of the kata runtime class. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector specifies the nodeSelector for the RuntimeClass object. + This ensures pods running with the RuntimeClass only get scheduled + onto nodes which support it. + type: object + required: + - artifacts + - name + type: object + type: array + type: object + enabled: + description: Enabled indicates if deployment of Kata Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Kata Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Kata Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Kata Manager image tag + type: string + type: object + mig: + description: MIG spec + properties: + strategy: + description: 'Optional: MIGStrategy to apply for GFD and NVIDIA + Device Plugin' + enum: + - none + - single + - mixed + type: string + type: object + migManager: + description: MIGManager for configuration to deploy MIG Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom mig-parted configuration for NVIDIA + MIG Manager container' + properties: + default: + default: all-disabled + description: Default MIG config to be applied on the node, + when there is no config specified with the node label nvidia.com/mig.config + enum: + - all-disabled + - "" + type: string + name: + default: default-mig-parted-config + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA MIG Manager + is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gpuClientsConfig: + description: 'Optional: Custom gpu-clients configuration for NVIDIA + MIG Manager container' + properties: + name: + description: ConfigMap name + type: string + type: object + image: + description: NVIDIA MIG Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA MIG Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA MIG Manager image tag + type: string + type: object + nodeStatusExporter: + description: NodeStatusExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of Node Status Exporter + is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Node Status Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Node Status Exporterimage repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Node Status Exporterimage tag + type: string + type: object + operator: + description: Operator component spec + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + defaultRuntime: + default: docker + description: Runtime defines container runtime type + enum: + - docker + - crio + - containerd + type: string + initContainer: + description: InitContainerSpec describes configuration for initContainer + image used with all components + properties: + image: + description: Image represents image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents image repository path + type: string + version: + description: Version represents image tag(version) + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + runtimeClass: + default: nvidia + type: string + use_ocp_driver_toolkit: + description: UseOpenShiftDriverToolkit indicates if DriverToolkit + image should be used on OpenShift to build and install driver + modules + type: boolean + required: + - defaultRuntime + type: object + psa: + description: PSA defines spec for PodSecurityAdmission configuration + properties: + enabled: + description: Enabled indicates if PodSecurityAdmission configuration + needs to be enabled for all Pods + type: boolean + type: object + psp: + description: |- + Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead + PSP defines spec for handling PodSecurityPolicies + properties: + enabled: + description: Enabled indicates if PodSecurityPolicies needs to + be enabled for all Pods + type: boolean + type: object + sandboxDevicePlugin: + description: SandboxDevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Sandbox + Device Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Sandbox Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA Sandbox Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Sandbox Device Plugin image tag + type: string + type: object + sandboxWorkloads: + description: SandboxWorkloads defines the spec for handling sandbox + workloads (i.e. Virtual Machines) + properties: + defaultWorkload: + default: container + description: |- + DefaultWorkload indicates the default GPU workload type to configure + worker nodes in the cluster for + enum: + - container + - vm-passthrough + - vm-vgpu + type: string + enabled: + description: |- + Enabled indicates if the GPU Operator should manage additional operands required + for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) + type: boolean + type: object + toolkit: + description: Toolkit component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Container + Toolkit through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Container Toolkit image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + installDir: + default: /usr/local/nvidia + description: Toolkit install directory on the host + type: string + repository: + description: NVIDIA Container Toolkit image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Container Toolkit image tag + type: string + type: object + validator: + description: Validator defines the spec for operator-validator daemonset + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + cuda: + description: CUDA validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + driver: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Validator image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + plugin: + description: Plugin validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + repository: + description: Validator image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toolkit: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + version: + description: Validator image tag + type: string + vfioPCI: + description: VfioPCI validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuDevices: + description: VGPUDevices validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuManager: + description: VGPUManager validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + type: object + vfioManager: + description: VFIOManager for configuration to deploy VFIO-PCI Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of VFIO Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: VFIO Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: VFIO Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: VFIO Manager image tag + type: string + type: object + vgpuDeviceManager: + description: VGPUDeviceManager spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: NVIDIA vGPU devices configuration for NVIDIA vGPU + Device Manager container + properties: + default: + default: default + description: Default config name within the ConfigMap + type: string + name: + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Device + Manager is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Device Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Device Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Device Manager image tag + type: string + type: object + vgpuManager: + description: VGPUManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Manager + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Manager image tag + type: string + type: object + required: + - daemonsets + - dcgm + - dcgmExporter + - devicePlugin + - driver + - gfd + - nodeStatusExporter + - operator + - toolkit + type: object + status: + description: ClusterPolicyStatus defines the observed state of ClusterPolicy + properties: + conditions: + description: Conditions is a list of conditions representing the ClusterPolicy's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + is installed + type: string + state: + description: State indicates status of ClusterPolicy + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.3.0/manifests/nvidia.com_nvidiadrivers.yaml b/bundle/v24.3.0/manifests/nvidia.com_nvidiadrivers.yaml new file mode 100644 index 000000000..317972fd2 --- /dev/null +++ b/bundle/v24.3.0/manifests/nvidia.com_nvidiadrivers.yaml @@ -0,0 +1,800 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: nvidiadrivers.nvidia.com +spec: + group: nvidia.com + names: + kind: NVIDIADriver + listKind: NVIDIADriverList + plural: nvidiadrivers + shortNames: + - nvd + - nvdriver + - nvdrivers + singular: nvidiadriver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: NVIDIADriver is the Schema for the nvidiadrivers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NVIDIADriverSpec defines the desired state of NVIDIADriver + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for NVIDIA + Driver container' + properties: + name: + type: string + type: object + driverType: + default: gpu + description: DriverType defines NVIDIA driver type + enum: + - gpu + - vgpu + - vgpu-host-manager + type: string + x-kubernetes-validations: + - message: driverType is an immutable field. Please create a new NvidiaDriver + resource instead when you want to change this setting. + rule: self == oldSelf + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gdrcopy: + description: GDRCopy defines the spec for GDRCopy driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GDRCopy diver image repository + type: string + version: + description: GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + image: + default: nvcr.io/nvidia/driver + description: NVIDIA Driver container image name + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters for + the NVIDIA Driver' + properties: + name: + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU licensing' + properties: + name: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System is + used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver Manager + initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository path + type: string + version: + description: Version represents NVIDIA Driver Manager image tag(version) + type: string + type: object + nodeAffinity: + description: Affinity specifies node affinity rules for driver pods + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a selector for installation of + NVIDIA driver + type: object + priorityClassName: + description: 'Optional: Set priorityClassName' + type: string + rdma: + description: GPUDirectRDMA defines the spec for NVIDIA Peer Memory + driver + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled through + GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + name: + type: string + type: object + repository: + description: NVIDIA Driver repository + type: string + resources: + description: 'Optional: Define resources requests and limits for each + pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA Driver + using pre-compiled modules is enabled + type: boolean + x-kubernetes-validations: + - message: usePrecompiled is an immutable field. Please create a new + NvidiaDriver resource instead when you want to change this setting. + rule: self == oldSelf + version: + description: NVIDIA Driver version (or just branch for precompiled + drivers) + type: string + virtualTopologyConfig: + description: 'Optional: Virtual Topology Daemon configuration for + NVIDIA vGPU drivers' + properties: + name: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + required: + - driverType + - image + type: object + status: + description: NVIDIADriverStatus defines the observed state of NVIDIADriver + properties: + conditions: + description: Conditions is a list of conditions representing the NVIDIADriver's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + and driver are installed + type: string + state: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + State indicates status of NVIDIADriver instance + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.3.0/metadata/annotations.yaml b/bundle/v24.3.0/metadata/annotations.yaml new file mode 100644 index 000000000..d718b4016 --- /dev/null +++ b/bundle/v24.3.0/metadata/annotations.yaml @@ -0,0 +1,17 @@ +annotations: + operators.operatorframework.io.bundle.channels.v1: stable,v24.3 + operators.operatorframework.io.bundle.channel.default.v1: v24.3 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: gpu-operator-certified + operators.operatorframework.io.metrics.builder: operator-sdk-v1.4.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: nvidia-gpu-operator + + # Annotations to specify OCP versions compatibility. + com.redhat.openshift.versions: v4.9-v4.15 diff --git a/bundle/v24.6.0/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/v24.6.0/manifests/gpu-operator-certified.clusterserviceversion.yaml new file mode 100644 index 000000000..7f6784a6f --- /dev/null +++ b/bundle/v24.6.0/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -0,0 +1,921 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + labels: + operatorframework.io/arch.arm64: supported + operatorframework.io/arch.amd64: supported + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + annotations: + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + features.operators.openshift.io/cnf: "false" + features.operators.openshift.io/cni: "false" + features.operators.openshift.io/csi: "false" + olm.skipRange: '>=1.9.0 <24.6.0' + alm-examples: |- + [ + { + "apiVersion": "nvidia.com/v1", + "kind": "ClusterPolicy", + "metadata": { + "name": "gpu-cluster-policy" + }, + "spec": { + "operator": { + "defaultRuntime": "crio", + "use_ocp_driver_toolkit": true, + "initContainer": { + } + }, + "sandboxWorkloads": { + "enabled": false, + "defaultWorkload": "container" + }, + "driver": { + "enabled": true, + "useNvidiaDriverCRD": false, + "useOpenKernelModules": false, + "upgradePolicy": { + "autoUpgrade": true, + "drain": { + "deleteEmptyDir": false, + "enable": false, + "force": false, + "timeoutSeconds": 300 + }, + "maxParallelUpgrades": 1, + "maxUnavailable": "25%", + "podDeletion": { + "deleteEmptyDir": false, + "force": false, + "timeoutSeconds": 300 + }, + "waitForCompletion": { + "timeoutSeconds": 0 + } + }, + "repoConfig": { + "configMapName": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "configMapName": "" + }, + "virtualTopology": { + "config": "" + }, + "kernelModuleConfig": { + "name": "" + } + }, + "dcgmExporter": { + "enabled": true, + "config": { + "name": "" + }, + "serviceMonitor": { + "enabled": true + } + }, + "dcgm": { + "enabled": true + }, + "daemonsets": { + "updateStrategy": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "1" + } + }, + "devicePlugin": { + "enabled": true, + "config": { + "name": "", + "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" + } + }, + "gfd": { + "enabled": true + }, + "migManager": { + "enabled": true + }, + "nodeStatusExporter": { + "enabled": true + }, + "mig": { + "strategy": "single" + }, + "toolkit": { + "enabled": true + }, + "validator": { + "plugin": { + "env": [ + { + "name": "WITH_WORKLOAD", + "value": "false" + } + ] + } + }, + "vgpuManager": { + "enabled": false + }, + "vgpuDeviceManager": { + "enabled": true + }, + "sandboxDevicePlugin": { + "enabled": true + }, + "vfioManager": { + "enabled": true + }, + "gds": { + "enabled": false + }, + "gdrcopy": { + "enabled": false + } + } + }, + { + "apiVersion": "nvidia.com/v1alpha1", + "kind": "NVIDIADriver", + "metadata": { + "name": "gpu-driver" + }, + "spec": { + "driverType": "gpu", + "repository": "nvcr.io/nvidia", + "image": "driver", + "version": "sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415", + "nodeSelector": {}, + "manager": {}, + "repoConfig": { + "name": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "name": "" + }, + "virtualTopologyConfig": { + "name": "" + }, + "kernelModuleConfig": { + "name": "" + } + } + } + ] + operators.operatorframework.io/builder: operator-sdk-v1.4.0 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operatorframework.io/suggested-namespace: nvidia-gpu-operator + capabilities: Deep Insights + categories: AI/Machine Learning, OpenShift Optional + certified: "true" + containerImage: nvcr.io/nvidia/gpu-operator@sha256:980454fdcc3084ffab1c8b8681819c3ca770738b96633a4497b3fc27e669fef9 + createdAt: "Thu Jul 27 13:57:56 PDT 2023" + description: Automate the management and monitoring of NVIDIA GPUs. + provider: NVIDIA + repository: http://github.com/NVIDIA/gpu-operator + support: NVIDIA + name: gpu-operator-certified.v24.6.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + relatedImages: + - name: gpu-operator-image + image: nvcr.io/nvidia/gpu-operator@sha256:980454fdcc3084ffab1c8b8681819c3ca770738b96633a4497b3fc27e669fef9 + - name: dcgm-exporter-image + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:21f4c8b88716e8e6f732f9fb4c2efaef937c227491a8631c5e55036f80f39a4d + - name: dcgm-image + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:15dab1273345df4a5844c4c761d064dbc4b592101251dc39174e597137123027 + - name: container-toolkit-image + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:f95ef6a0c377e011bc0561c7d2c2bf32e45106fb0ba91ae9a10f97236ded0581 + - name: driver-image + image: nvcr.io/nvidia/driver@sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415 + - name: driver-image-535 + image: nvcr.io/nvidia/driver@sha256:a6d12fb5753f267dda25dfd38910f972bc632c006a24107fa50e20bba3642d7c + - name: driver-image-470 + image: nvcr.io/nvidia/driver@sha256:07e11f85d54d49ec9648fb06e148b8d832ee1f9c3549a915eee853c9ef2949c2 + - name: device-plugin-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:f85fd6e328e36d4737bf394bf8bd69bec793656af686ca0491f28730d9b96d1a + - name: gpu-feature-discovery-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:f85fd6e328e36d4737bf394bf8bd69bec793656af686ca0491f28730d9b96d1a + - name: mig-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:781fb47e264d9e0fbc8da5bd046e5e678316c866bc36ddd4b56d4eb0de682d5b + - name: init-container-image + image: nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe + - name: gpu-operator-validator-image + image: nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:a4c9c6244f2a70b6c868ad4eb6b8eaf0a1fe9f91c9baefd8f58b0ad085dd715b + - name: k8s-driver-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:740abc3ff657545c10effd5354f09af525200ed9a1b7623f0c2e8c7bd9e4a4e2 + - name: vfio-manager-image + image: nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe + - name: sandbox-device-plugin-image + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:969147c01d63be5d1fe458f32f1cc0c7408cf3062531db91408e2fc57b4d8a67 + - name: vgpu-device-manager-image + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:ae63fac9a4057a7646f0cf0ee0566e8928529adde05c4c0a017cda0599e381b2 + - name: gdrcopy-image + image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:33de74eb590f071403c17b6c210c02963245851971168bc0c07c06c100a9f376 + customresourcedefinitions: + owned: + - name: nvidiadrivers.nvidia.com + kind: NVIDIADriver + version: v1alpha1 + displayName: NVIDIADriver + description: NVIDIADriver allows you to deploy the NVIDIA driver + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + statusDescriptors: + - description: The current state of the driver. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + - name: clusterpolicies.nvidia.com + kind: ClusterPolicy + version: v1 + group: nvidia.com + displayName: ClusterPolicy + description: ClusterPolicy allows you to configure the GPU Operator + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: Deployment + name: '' + version: apps/v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: GPU Operator config + displayName: GPU Operator config + path: operator + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: operator.validator.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: Image pull secrets + displayName: Image pull secrets + path: operator.validator.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - description: NVIDIA GPU/vGPU Driver config + displayName: NVIDIA GPU/vGPU Driver config + path: driver + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: driver.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: driver.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: driver.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: driver.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA DCGM Exporter config + displayName: NVIDIA DCGM Exporter config + path: dcgmExporter + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: dcgmExporter.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: dcgmExporter.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: dcgmExporter.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: dcgmExporter.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: dcgmExporter.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Device Plugin config + displayName: NVIDIA Device Plugin config + path: devicePlugin + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: devicePlugin.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: devicePlugin.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: devicePlugin.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: devicePlugin.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: devicePlugin.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: GPU Feature Discovery Plugin config + displayName: GPU Feature Discovery Plugin config + path: gfd + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: gfd.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: gfd.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: gfd.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: gfd.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: gfd.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Container Toolkit config + displayName: NVIDIA Container Toolkit config + path: toolkit + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: toolkit.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: toolkit.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: toolkit.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: toolkit.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: toolkit.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - displayName: NVIDIA DCGM config + description: NVIDIA DCGM config + path: dcgm + - displayName: Validator config + description: Validator config + path: validator + - displayName: Node Status Exporter config + description: Node Status Exporter config + path: nodeStatusExporter + - displayName: Daemonsets config + description: Daemonsets config + path: daemonsets + - displayName: MIG config + description: MIG config + path: mig + - displayName: NVIDIA MIG Manager config + description: NVIDIA MIG Manager config + path: migManager + - displayName: PodSecurityPolicy config + description: PodSecurityPolicy config + path: psp + - displayName: NVIDIA GPUDirect Storage config + description: NVIDIA GPUDirect Storage config + path: gds + - displayName: Sandbox Workloads config + description: Sandbox Workloads config + path: sandboxWorkloads + - displayName: NVIDIA vGPU Manager config + description: NVIDIA vGPU Manager config + path: vgpuManager + - displayName: NVIDIA vGPU Device Manager config + description: NVIDIA vGPU Device Manager config + path: vgpuDeviceManager + - displayName: VFIO Manager config + description: VFIO Manager config + path: vfioManager + - displayName: NVIDIA Sandbox Device Plugin config + description: NVIDIA Sandbox Device Plugin config + path: sandboxDevicePlugin + statusDescriptors: + - description: The current state of the operator. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + displayName: NVIDIA GPU Operator + description: > + Kubernetes provides access to special hardware resources such as NVIDIA + GPUs, NICs, Infiniband adapters and other devices through the [device plugin + framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). + However, configuring and managing nodes with these hardware resources + requires configuration of multiple software components such as drivers, + container runtimes or other libraries which are difficult and prone to + errors. + + The NVIDIA GPU Operator uses the [operator + framework](https://cloud.redhat.com/blog/introducing-the-operator-framework) within + Kubernetes to automate the management of all NVIDIA software components + needed to provision and monitor GPUs. + These components include the NVIDIA drivers (to enable CUDA), Kubernetes + device plugin for GPUs, the NVIDIA Container Runtime, automatic node + labelling and NVIDIA DCGM exporter. + + Visit the official site of the [GPU Operator](https://github.com/NVIDIA/gpu-operator) for more information. + For getting started with using the GPU Operator with OpenShift, see the instructions + [here](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/contents.html). + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAB2lBMVEUAAAD///8EBAN3uQACAgIAAAQJDQUCAgB1tgAHCQf+/v5Ufg5Hagxxqwt+xgJ3uAB9wwB4vQBRUVEeLA3e3t5nZ2coKCgODg4FBwZ9wwR6wAJ4vADz8/MbGxt5tw1vpw1/wgoOFwkLDwh9xQH5+fny8vLw8PDFxcWysrKFhYVvb282NjYyMjIqKioXFxdikxRYgxNCYxJQdhFqoQ9xrg16ugxyqgyAxQkEBQj7+/v29vbIyMhjY2NbW1tHR0cvLy8kJCQdHR0ZGRlKbxJ8uhFNcxFVgBAxSBBgkg93tQ50sA4qPg4XIg18vwsbKQsSGgsLCwsMEwqCyQeByQFztADPz8+/v7+6urqWlpZra2tKSkogICASEhJmmRE8XBA5VRA2UBBonA9biA9GaQ4sQg4jMw4mOQ0aJw2GzgsUHgttpAqJ0Ql/wQWG0AJ8vwF0uQCtra2jo6OQkJB9fX1VVVVCQkI9PT0iIiIUFBRSfBNgjhA7WRBGZw+GywmFzgaAyASBxQN2twDb29u2traenp6Kiop+fn53d3dzc3NyqRV4sxM/YBNAXRElNhBjlQ+IzA00TQ16vgxJbgp6vAl4tgJ3vgDs7Ozn5+fa2trS0tJCXRY6VBV6thSL1gf4nFdFAAAD80lEQVRYw+zSOXPaQBgG4He0LJJmbGRGDUIzuvgBQiAEPfcdwC33DTbUtmOwSyc+4iRucvzXRImLFJmRShc8xXbfu+9+szg4OHjjAsH/iFD49q7rqM6xc/wPtWyBhS8sC94ObWRCZDksh1+RzmcEfI0DoPrjylEkSTgViMs9udjYTwMG4Gf51Z1BM81ioRwit+QvgYsdUQZeKFr3ladyKXvVr+pAM5uKcmRLXFzoCIxn+0i/8lSaBMHnfi7qowfQuZnm3PuFPwGs13zD3NlViozY/z4YD6/TCQORbPr2q78GLB0ou5IO40pd5AxQZnJ83m2y9Ju2JYKfgEhWC18aEIfrZLURHwQC0B87ySZwHxX8BNDWB1KfQfyxT2TA24uPQMt8yTWA3obz8wQGlhTN06Z900MkuJLrYu3u5LkK9LTtGRF8NEDLeSnXYLUdHUFVlpPqTa4IamlhJZ464biY1w4CKGrROOW7uwLlV+Q02lanCF6cbSoPVLzUfPwDll5I9T6WyXWhZre1yjiI6VCSzCWY3+FKaAwGHngzpEygx6+V6Uzk6TJR7yhWxJ1bFgTPJ7gMc58aUCq+n+qNT6Pn8y/xOcCiZZVjnJ+AAPhEuj0SKZ9bL9ZpNS9SgM6z9p5w3jt43cMvecfWBhm7dtfEpfhYMDBYpFd7mDZIAxPCFKgBhB0hkWbE2wVMyqycfhOMEiebSzFz5IMTEjw7E87UFj4GVR7GXqaSkoIcISEc/I38/PwhOTUMRBrADgwK09zgYGUBqbwcARiQyp3Eyk6kC4BloqtbJTcaSHIHShALWFmBSRuCWBGC+AtDMAAGIpAAc9mBiB0sCLSXHUSygxSxEIoE7IKEgbhopKgogC96x04QCMMw/H0cG6f0cEmBHaLc7FFQzApoTLwtQgWUWo26glx2mzGkyoHM1PPMO/NrnSH8e2QAiRsZ8S3ZuJoW5Udg5moGoMRLN2gAnkcUctueJ1gADsdtlZ2AgmSYoaDZBXwRctcwy6HN3XX/wfnTnA7Q5x0S0Gku4wHpe7Ql8Mbtu4TqC3qcADGtUl4O3eK0AkZdKH1mU/a6MFQGA7pQGoAVoAuuPYZlLJF2BawVLLjwac6Q8wUax61/CpKQAT6ZX3hFqoqqAFvuf4AzM+NgsoBS/wcSOD7SFzyf6CE9UQK9II1MRvIJm8QSgsLiBZuypsAWKyARElgx5FcLv1N4nFLbB45Sh6+TzsQRtn7bz/B3fS9GQ12bgUE2PKycQbwgXD0SWLwVhpZFq4eHhWloOjLoqGvoRYRGAR2vp2EtpNUaTUpiRAizMAEhKNXpYZNnAUlBCSgFYTIxQTlMMJNGwSgYBdQHAFsKs+/bUkeyAAAAAElFTkSuQmCC + mediatype: image/png + install: + spec: + clusterPermissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - nvidia.com + resources: + - clusterpolicies + - clusterpolicies/finalizers + - clusterpolicies/status + - nvidiadrivers + - nvidiadrivers/finalizers + - nvidiadrivers/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + - apiGroups: + - config.openshift.io + resources: + - clusterversions + - proxies + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + - create + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - hostmount-anyuid + - apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - list + - create + - watch + - update + - delete + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - pods + - pods/eviction + - services + - services/finalizers + - events + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - create + - watch + - update + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch + - apiGroups: + - node.k8s.io + resources: + - runtimeclasses + verbs: + - get + - list + - create + - update + - watch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + permissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - secrets + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + deployments: + - name: gpu-operator + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + strategy: {} + template: + metadata: + labels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + nvidia.com/gpu-driver-upgrade-drain.skip: "true" + spec: + priorityClassName: system-node-critical + containers: + - args: + - --leader-elect + - --leader-lease-renew-deadline + - "60s" + image: nvcr.io/nvidia/gpu-operator@sha256:980454fdcc3084ffab1c8b8681819c3ca770738b96633a4497b3fc27e669fef9 + command: + - gpu-operator + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: gpu-operator + ports: + - name: metrics + containerPort: 8080 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "VALIDATOR_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:a4c9c6244f2a70b6c868ad4eb6b8eaf0a1fe9f91c9baefd8f58b0ad085dd715b" + - name: "GFD_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:f85fd6e328e36d4737bf394bf8bd69bec793656af686ca0491f28730d9b96d1a" + - name: "CONTAINER_TOOLKIT_IMAGE" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:f95ef6a0c377e011bc0561c7d2c2bf32e45106fb0ba91ae9a10f97236ded0581" + - name: "DCGM_IMAGE" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:15dab1273345df4a5844c4c761d064dbc4b592101251dc39174e597137123027" + - name: "DCGM_EXPORTER_IMAGE" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:21f4c8b88716e8e6f732f9fb4c2efaef937c227491a8631c5e55036f80f39a4d" + - name: "DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:f85fd6e328e36d4737bf394bf8bd69bec793656af686ca0491f28730d9b96d1a" + - name: "DRIVER_IMAGE" + value: "nvcr.io/nvidia/driver@sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415" + - name: "DRIVER_IMAGE-535" + value: "nvcr.io/nvidia/driver@sha256:a6d12fb5753f267dda25dfd38910f972bc632c006a24107fa50e20bba3642d7c" + - name: "DRIVER_IMAGE-470" + value: "nvcr.io/nvidia/driver@sha256:07e11f85d54d49ec9648fb06e148b8d832ee1f9c3549a915eee853c9ef2949c2" + - name: "DRIVER_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:740abc3ff657545c10effd5354f09af525200ed9a1b7623f0c2e8c7bd9e4a4e2" + - name: "MIG_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:781fb47e264d9e0fbc8da5bd046e5e678316c866bc36ddd4b56d4eb0de682d5b" + - name: "CUDA_BASE_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe" + - name: "VFIO_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe" + - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:969147c01d63be5d1fe458f32f1cc0c7408cf3062531db91408e2fc57b4d8a67" + - name: "VGPU_DEVICE_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:ae63fac9a4057a7646f0cf0ee0566e8928529adde05c4c0a017cda0599e381b2" + - name: "GDRCOPY_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:33de74eb590f071403c17b6c210c02963245851971168bc0c07c06c100a9f376" + terminationGracePeriodSeconds: 10 + volumes: + - hostPath: + path: /etc/os-release + name: host-os-release + serviceAccountName: gpu-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - gpu + - cuda + - compute + - operator + - deep learning + - monitoring + - tesla + maintainers: + - name: NVIDIA + email: operator_feedback@nvidia.com + maturity: stable + provider: + name: NVIDIA Corporation + version: 24.6.0 + replaces: gpu-operator-certified.v24.3.0 diff --git a/bundle/v24.6.0/manifests/nvidia.com_clusterpolicies.yaml b/bundle/v24.6.0/manifests/nvidia.com_clusterpolicies.yaml new file mode 100644 index 000000000..8e29cabf1 --- /dev/null +++ b/bundle/v24.6.0/manifests/nvidia.com_clusterpolicies.yaml @@ -0,0 +1,2404 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clusterpolicies.nvidia.com +spec: + group: nvidia.com + names: + kind: ClusterPolicy + listKind: ClusterPolicyList + plural: clusterpolicies + singular: clusterpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterPolicy is the Schema for the clusterpolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterPolicySpec defines the desired state of ClusterPolicy + properties: + ccManager: + description: CCManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + defaultMode: + description: Default CC mode setting for compatible GPUs on the + node + enum: + - "on" + - "off" + - devtools + type: string + enabled: + description: Enabled indicates if deployment of CC Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: CC Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: CC Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: CC Manager image tag + type: string + type: object + cdi: + description: CDI configures how the Container Device Interface is + used in the cluster + properties: + default: + default: false + description: Default indicates whether to use CDI as the default + mechanism for providing GPU access to containers. + type: boolean + enabled: + default: false + description: Enabled indicates whether CDI can be used to make + GPUs accessible to containers. + type: boolean + type: object + daemonsets: + description: Daemonset defines common configuration for all Daemonsets + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + priorityClassName: + type: string + rollingUpdate: + description: 'Optional: Configuration for rolling update of all + DaemonSet pods' + properties: + maxUnavailable: + type: string + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + updateStrategy: + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + type: string + type: object + dcgm: + description: DCGM component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Hostengine + as a separate pod is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + hostPort: + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' + format: int32 + type: integer + image: + description: NVIDIA DCGM image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA DCGM image tag + type: string + type: object + dcgmExporter: + description: DCGMExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom metrics configuration for NVIDIA + DCGM Exporter' + properties: + name: + description: ConfigMap name with file dcgm-metrics.csv for + metrics to be collected by NVIDIA DCGM Exporter + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Exporter + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA DCGM Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM Exporter image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serviceMonitor: + description: 'Optional: ServiceMonitor configuration for NVIDIA + DCGM Exporter' + properties: + additionalLabels: + additionalProperties: + type: string + description: AdditionalLabels to add to ServiceMonitor instance + for NVIDIA DCGM Exporter + type: object + enabled: + description: Enabled indicates if ServiceMonitor is deployed + for NVIDIA DCGM Exporter + type: boolean + honorLabels: + description: HonorLabels chooses the metric’s labels on collisions + with target labels. + type: boolean + interval: + description: |- + Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. + Supported units: y, w, d, h, m, s, ms + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + relabelings: + description: Relabelings allows to rewrite labels on metric + sets for NVIDIA DCGM Exporter + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + + Regex capture groups are available. + type: string + type: object + type: array + type: object + version: + description: NVIDIA DCGM Exporter image tag + type: string + type: object + devicePlugin: + description: DevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Configuration for the NVIDIA Device Plugin + via the ConfigMap' + properties: + default: + description: Default config name within the ConfigMap for + the NVIDIA Device Plugin config + type: string + name: + description: ConfigMap name for NVIDIA Device Plugin config + including shared config between plugin and GFD + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Device + Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object + repository: + description: NVIDIA Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Device Plugin image tag + type: string + type: object + driver: + description: Driver component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for + NVIDIA Driver container' + properties: + name: + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Driver + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters + for the NVIDIA Driver' + properties: + name: + type: string + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU + licensing' + properties: + configMapName: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System + is used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver + Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + rdma: + description: GPUDirectRDMASpec defines the properties for nvidia-peermem + deployment + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled + through GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + configMapName: + type: string + type: object + repository: + description: NVIDIA Driver image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + upgradePolicy: + description: Driver auto-upgrade settings + properties: + autoUpgrade: + default: false + description: |- + AutoUpgrade is a global switch for automatic upgrade feature + if set to false all other options are ignored + type: boolean + drain: + description: DrainSpec describes configuration for node drain + during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the node is drained) + type: boolean + enable: + default: false + description: Enable indicates if node draining is allowed + during upgrade + type: boolean + force: + default: false + description: Force indicates if force draining is allowed + type: boolean + podSelector: + description: |- + PodSelector specifies a label selector to filter pods on the node that need to be drained + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 300 + description: TimeoutSecond specifies the length of time + in seconds to wait before giving up drain, zero means + infinite + minimum: 0 + type: integer + type: object + maxParallelUpgrades: + default: 1 + description: |- + MaxParallelUpgrades indicates how many nodes can be upgraded in parallel + 0 means no limit, all nodes will be upgraded in parallel + minimum: 0 + type: integer + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. + Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). + Absolute number is calculated from percentage by rounding up. + By default, a fixed value of 25% is used. + x-kubernetes-int-or-string: true + podDeletion: + description: PodDeletionSpec describes configuration for deletion + of pods using special resources during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the pod is deleted) + type: boolean + force: + default: false + description: Force indicates if force deletion is allowed + type: boolean + timeoutSeconds: + default: 300 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + waitForCompletion: + description: WaitForCompletionSpec describes the configuration + for waiting on job completions + properties: + podSelector: + description: |- + PodSelector specifies a label selector for the pods to wait for completion + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 0 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + type: object + useNvidiaDriverCRD: + description: UseNvidiaDriverCRD indicates if the deployment of + NVIDIA Driver is managed by the NVIDIADriver CRD type + type: boolean + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA + Driver using pre-compiled modules is enabled + type: boolean + version: + description: NVIDIA Driver image tag + type: string + virtualTopology: + description: 'Optional: Virtual Topology Daemon configuration + for NVIDIA vGPU drivers' + properties: + config: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + type: object + gdrcopy: + description: GDRCopy component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + Operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GDRCopy driver image repository + type: string + version: + description: NVIDIA GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS components(Experimental) + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + gfd: + description: GPUFeatureDiscovery spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of GPU Feature Discovery + Plugin is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GFD image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GFD image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: GFD image tag + type: string + type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object + kataManager: + description: KataManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: Kata Manager config + properties: + artifactsDir: + default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses + description: |- + ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) + are placed on the local filesystem. + type: string + runtimeClasses: + description: RuntimeClasses is a list of kata runtime classes + to configure. + items: + description: RuntimeClass defines the configuration for + a kata RuntimeClass + properties: + artifacts: + description: Artifacts are the kata artifacts associated + with the runtime class. + properties: + pullSecret: + description: PullSecret is the secret used to pull + the OCI artifact. + type: string + url: + description: |- + URL is the path to the OCI artifact (payload) containing all artifacts + associated with a kata runtime class. + type: string + required: + - url + type: object + name: + description: Name is the name of the kata runtime class. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector specifies the nodeSelector for the RuntimeClass object. + This ensures pods running with the RuntimeClass only get scheduled + onto nodes which support it. + type: object + required: + - artifacts + - name + type: object + type: array + type: object + enabled: + description: Enabled indicates if deployment of Kata Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Kata Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Kata Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Kata Manager image tag + type: string + type: object + mig: + description: MIG spec + properties: + strategy: + description: 'Optional: MIGStrategy to apply for GFD and NVIDIA + Device Plugin' + enum: + - none + - single + - mixed + type: string + type: object + migManager: + description: MIGManager for configuration to deploy MIG Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom mig-parted configuration for NVIDIA + MIG Manager container' + properties: + default: + default: all-disabled + description: Default MIG config to be applied on the node, + when there is no config specified with the node label nvidia.com/mig.config + enum: + - all-disabled + - "" + type: string + name: + default: default-mig-parted-config + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA MIG Manager + is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gpuClientsConfig: + description: 'Optional: Custom gpu-clients configuration for NVIDIA + MIG Manager container' + properties: + name: + description: ConfigMap name + type: string + type: object + image: + description: NVIDIA MIG Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA MIG Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA MIG Manager image tag + type: string + type: object + nodeStatusExporter: + description: NodeStatusExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of Node Status Exporter + is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Node Status Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Node Status Exporterimage repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Node Status Exporterimage tag + type: string + type: object + operator: + description: Operator component spec + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + defaultRuntime: + default: docker + description: Runtime defines container runtime type + enum: + - docker + - crio + - containerd + type: string + initContainer: + description: InitContainerSpec describes configuration for initContainer + image used with all components + properties: + image: + description: Image represents image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents image repository path + type: string + version: + description: Version represents image tag(version) + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + runtimeClass: + default: nvidia + type: string + use_ocp_driver_toolkit: + description: UseOpenShiftDriverToolkit indicates if DriverToolkit + image should be used on OpenShift to build and install driver + modules + type: boolean + required: + - defaultRuntime + type: object + psa: + description: PSA defines spec for PodSecurityAdmission configuration + properties: + enabled: + description: Enabled indicates if PodSecurityAdmission configuration + needs to be enabled for all Pods + type: boolean + type: object + psp: + description: |- + Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead + PSP defines spec for handling PodSecurityPolicies + properties: + enabled: + description: Enabled indicates if PodSecurityPolicies needs to + be enabled for all Pods + type: boolean + type: object + sandboxDevicePlugin: + description: SandboxDevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Sandbox + Device Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Sandbox Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA Sandbox Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Sandbox Device Plugin image tag + type: string + type: object + sandboxWorkloads: + description: SandboxWorkloads defines the spec for handling sandbox + workloads (i.e. Virtual Machines) + properties: + defaultWorkload: + default: container + description: |- + DefaultWorkload indicates the default GPU workload type to configure + worker nodes in the cluster for + enum: + - container + - vm-passthrough + - vm-vgpu + type: string + enabled: + description: |- + Enabled indicates if the GPU Operator should manage additional operands required + for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) + type: boolean + type: object + toolkit: + description: Toolkit component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Container + Toolkit through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Container Toolkit image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + installDir: + default: /usr/local/nvidia + description: Toolkit install directory on the host + type: string + repository: + description: NVIDIA Container Toolkit image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Container Toolkit image tag + type: string + type: object + validator: + description: Validator defines the spec for operator-validator daemonset + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + cuda: + description: CUDA validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + driver: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Validator image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + plugin: + description: Plugin validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + repository: + description: Validator image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toolkit: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + version: + description: Validator image tag + type: string + vfioPCI: + description: VfioPCI validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuDevices: + description: VGPUDevices validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuManager: + description: VGPUManager validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + type: object + vfioManager: + description: VFIOManager for configuration to deploy VFIO-PCI Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of VFIO Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: VFIO Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: VFIO Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: VFIO Manager image tag + type: string + type: object + vgpuDeviceManager: + description: VGPUDeviceManager spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: NVIDIA vGPU devices configuration for NVIDIA vGPU + Device Manager container + properties: + default: + default: default + description: Default config name within the ConfigMap + type: string + name: + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Device + Manager is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Device Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Device Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Device Manager image tag + type: string + type: object + vgpuManager: + description: VGPUManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Manager + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Manager image tag + type: string + type: object + required: + - daemonsets + - dcgm + - dcgmExporter + - devicePlugin + - driver + - gfd + - nodeStatusExporter + - operator + - toolkit + type: object + status: + description: ClusterPolicyStatus defines the observed state of ClusterPolicy + properties: + conditions: + description: Conditions is a list of conditions representing the ClusterPolicy's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + is installed + type: string + state: + description: State indicates status of ClusterPolicy + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.6.0/manifests/nvidia.com_nvidiadrivers.yaml b/bundle/v24.6.0/manifests/nvidia.com_nvidiadrivers.yaml new file mode 100644 index 000000000..665088edd --- /dev/null +++ b/bundle/v24.6.0/manifests/nvidia.com_nvidiadrivers.yaml @@ -0,0 +1,810 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: nvidiadrivers.nvidia.com +spec: + group: nvidia.com + names: + kind: NVIDIADriver + listKind: NVIDIADriverList + plural: nvidiadrivers + shortNames: + - nvd + - nvdriver + - nvdrivers + singular: nvidiadriver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: NVIDIADriver is the Schema for the nvidiadrivers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NVIDIADriverSpec defines the desired state of NVIDIADriver + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for NVIDIA + Driver container' + properties: + name: + type: string + type: object + driverType: + default: gpu + description: DriverType defines NVIDIA driver type + enum: + - gpu + - vgpu + - vgpu-host-manager + type: string + x-kubernetes-validations: + - message: driverType is an immutable field. Please create a new NvidiaDriver + resource instead when you want to change this setting. + rule: self == oldSelf + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gdrcopy: + description: GDRCopy defines the spec for GDRCopy driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GDRCopy diver image repository + type: string + version: + description: GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + image: + default: nvcr.io/nvidia/driver + description: NVIDIA Driver container image name + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters for + the NVIDIA Driver' + properties: + name: + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU licensing' + properties: + name: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System is + used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver Manager + initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository path + type: string + version: + description: Version represents NVIDIA Driver Manager image tag(version) + type: string + type: object + nodeAffinity: + description: Affinity specifies node affinity rules for driver pods + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a selector for installation of + NVIDIA driver + type: object + priorityClassName: + description: 'Optional: Set priorityClassName' + type: string + rdma: + description: GPUDirectRDMA defines the spec for NVIDIA Peer Memory + driver + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled through + GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + name: + type: string + type: object + repository: + description: NVIDIA Driver repository + type: string + resources: + description: 'Optional: Define resources requests and limits for each + pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA Driver + using pre-compiled modules is enabled + type: boolean + x-kubernetes-validations: + - message: usePrecompiled is an immutable field. Please create a new + NvidiaDriver resource instead when you want to change this setting. + rule: self == oldSelf + version: + description: NVIDIA Driver version (or just branch for precompiled + drivers) + type: string + virtualTopologyConfig: + description: 'Optional: Virtual Topology Daemon configuration for + NVIDIA vGPU drivers' + properties: + name: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + required: + - driverType + - image + type: object + status: + description: NVIDIADriverStatus defines the observed state of NVIDIADriver + properties: + conditions: + description: Conditions is a list of conditions representing the NVIDIADriver's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + and driver are installed + type: string + state: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + State indicates status of NVIDIADriver instance + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.6.0/metadata/annotations.yaml b/bundle/v24.6.0/metadata/annotations.yaml new file mode 100644 index 000000000..f7383d5c4 --- /dev/null +++ b/bundle/v24.6.0/metadata/annotations.yaml @@ -0,0 +1,17 @@ +annotations: + operators.operatorframework.io.bundle.channels.v1: stable,v24.6 + operators.operatorframework.io.bundle.channel.default.v1: v24.6 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: gpu-operator-certified + operators.operatorframework.io.metrics.builder: operator-sdk-v1.4.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: nvidia-gpu-operator + + # Annotations to specify OCP versions compatibility. + com.redhat.openshift.versions: v4.12-v4.16 diff --git a/bundle/v24.6.1/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/v24.6.1/manifests/gpu-operator-certified.clusterserviceversion.yaml new file mode 100644 index 000000000..a4c4aec27 --- /dev/null +++ b/bundle/v24.6.1/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -0,0 +1,921 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + labels: + operatorframework.io/arch.arm64: supported + operatorframework.io/arch.amd64: supported + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + annotations: + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + features.operators.openshift.io/cnf: "false" + features.operators.openshift.io/cni: "false" + features.operators.openshift.io/csi: "false" + olm.skipRange: '>=1.9.0 <24.6.1' + alm-examples: |- + [ + { + "apiVersion": "nvidia.com/v1", + "kind": "ClusterPolicy", + "metadata": { + "name": "gpu-cluster-policy" + }, + "spec": { + "operator": { + "defaultRuntime": "crio", + "use_ocp_driver_toolkit": true, + "initContainer": { + } + }, + "sandboxWorkloads": { + "enabled": false, + "defaultWorkload": "container" + }, + "driver": { + "enabled": true, + "useNvidiaDriverCRD": false, + "useOpenKernelModules": false, + "upgradePolicy": { + "autoUpgrade": true, + "drain": { + "deleteEmptyDir": false, + "enable": false, + "force": false, + "timeoutSeconds": 300 + }, + "maxParallelUpgrades": 1, + "maxUnavailable": "25%", + "podDeletion": { + "deleteEmptyDir": false, + "force": false, + "timeoutSeconds": 300 + }, + "waitForCompletion": { + "timeoutSeconds": 0 + } + }, + "repoConfig": { + "configMapName": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "configMapName": "" + }, + "virtualTopology": { + "config": "" + }, + "kernelModuleConfig": { + "name": "" + } + }, + "dcgmExporter": { + "enabled": true, + "config": { + "name": "" + }, + "serviceMonitor": { + "enabled": true + } + }, + "dcgm": { + "enabled": true + }, + "daemonsets": { + "updateStrategy": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "1" + } + }, + "devicePlugin": { + "enabled": true, + "config": { + "name": "", + "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" + } + }, + "gfd": { + "enabled": true + }, + "migManager": { + "enabled": true + }, + "nodeStatusExporter": { + "enabled": true + }, + "mig": { + "strategy": "single" + }, + "toolkit": { + "enabled": true + }, + "validator": { + "plugin": { + "env": [ + { + "name": "WITH_WORKLOAD", + "value": "false" + } + ] + } + }, + "vgpuManager": { + "enabled": false + }, + "vgpuDeviceManager": { + "enabled": true + }, + "sandboxDevicePlugin": { + "enabled": true + }, + "vfioManager": { + "enabled": true + }, + "gds": { + "enabled": false + }, + "gdrcopy": { + "enabled": false + } + } + }, + { + "apiVersion": "nvidia.com/v1alpha1", + "kind": "NVIDIADriver", + "metadata": { + "name": "gpu-driver" + }, + "spec": { + "driverType": "gpu", + "repository": "nvcr.io/nvidia", + "image": "driver", + "version": "sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415", + "nodeSelector": {}, + "manager": {}, + "repoConfig": { + "name": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "name": "" + }, + "virtualTopologyConfig": { + "name": "" + }, + "kernelModuleConfig": { + "name": "" + } + } + } + ] + operators.operatorframework.io/builder: operator-sdk-v1.4.0 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operatorframework.io/suggested-namespace: nvidia-gpu-operator + capabilities: Deep Insights + categories: AI/Machine Learning, OpenShift Optional + certified: "true" + containerImage: nvcr.io/nvidia/gpu-operator@sha256:d51c3a34aaa9a5dfbdd3b710ee18d9eaa50aa0fb3518bacd541053d77c5c1098 + createdAt: "Mon Aug 12 11:35:29 PDT 2024" + description: Automate the management and monitoring of NVIDIA GPUs. + provider: NVIDIA + repository: http://github.com/NVIDIA/gpu-operator + support: NVIDIA + name: gpu-operator-certified.v24.6.1 + namespace: placeholder +spec: + apiservicedefinitions: {} + relatedImages: + - name: gpu-operator-image + image: nvcr.io/nvidia/gpu-operator@sha256:d51c3a34aaa9a5dfbdd3b710ee18d9eaa50aa0fb3518bacd541053d77c5c1098 + - name: dcgm-exporter-image + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:21f4c8b88716e8e6f732f9fb4c2efaef937c227491a8631c5e55036f80f39a4d + - name: dcgm-image + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:15dab1273345df4a5844c4c761d064dbc4b592101251dc39174e597137123027 + - name: container-toolkit-image + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:f95ef6a0c377e011bc0561c7d2c2bf32e45106fb0ba91ae9a10f97236ded0581 + - name: driver-image + image: nvcr.io/nvidia/driver@sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415 + - name: driver-image-535 + image: nvcr.io/nvidia/driver@sha256:a6d12fb5753f267dda25dfd38910f972bc632c006a24107fa50e20bba3642d7c + - name: driver-image-470 + image: nvcr.io/nvidia/driver@sha256:07e11f85d54d49ec9648fb06e148b8d832ee1f9c3549a915eee853c9ef2949c2 + - name: device-plugin-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534 + - name: gpu-feature-discovery-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534 + - name: mig-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:781fb47e264d9e0fbc8da5bd046e5e678316c866bc36ddd4b56d4eb0de682d5b + - name: init-container-image + image: nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe + - name: gpu-operator-validator-image + image: nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:0a48b6c65148358ab792b3dc23bce5d3e660e9176670f62864502f68647704f0 + - name: k8s-driver-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:740abc3ff657545c10effd5354f09af525200ed9a1b7623f0c2e8c7bd9e4a4e2 + - name: vfio-manager-image + image: nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe + - name: sandbox-device-plugin-image + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:969147c01d63be5d1fe458f32f1cc0c7408cf3062531db91408e2fc57b4d8a67 + - name: vgpu-device-manager-image + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:ae63fac9a4057a7646f0cf0ee0566e8928529adde05c4c0a017cda0599e381b2 + - name: gdrcopy-image + image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:33de74eb590f071403c17b6c210c02963245851971168bc0c07c06c100a9f376 + customresourcedefinitions: + owned: + - name: nvidiadrivers.nvidia.com + kind: NVIDIADriver + version: v1alpha1 + displayName: NVIDIADriver + description: NVIDIADriver allows you to deploy the NVIDIA driver + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + statusDescriptors: + - description: The current state of the driver. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + - name: clusterpolicies.nvidia.com + kind: ClusterPolicy + version: v1 + group: nvidia.com + displayName: ClusterPolicy + description: ClusterPolicy allows you to configure the GPU Operator + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: Deployment + name: '' + version: apps/v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: GPU Operator config + displayName: GPU Operator config + path: operator + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: operator.validator.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: Image pull secrets + displayName: Image pull secrets + path: operator.validator.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - description: NVIDIA GPU/vGPU Driver config + displayName: NVIDIA GPU/vGPU Driver config + path: driver + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: driver.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: driver.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: driver.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: driver.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA DCGM Exporter config + displayName: NVIDIA DCGM Exporter config + path: dcgmExporter + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: dcgmExporter.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: dcgmExporter.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: dcgmExporter.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: dcgmExporter.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: dcgmExporter.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Device Plugin config + displayName: NVIDIA Device Plugin config + path: devicePlugin + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: devicePlugin.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: devicePlugin.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: devicePlugin.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: devicePlugin.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: devicePlugin.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: GPU Feature Discovery Plugin config + displayName: GPU Feature Discovery Plugin config + path: gfd + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: gfd.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: gfd.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: gfd.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: gfd.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: gfd.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Container Toolkit config + displayName: NVIDIA Container Toolkit config + path: toolkit + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: toolkit.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: toolkit.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: toolkit.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: toolkit.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: toolkit.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - displayName: NVIDIA DCGM config + description: NVIDIA DCGM config + path: dcgm + - displayName: Validator config + description: Validator config + path: validator + - displayName: Node Status Exporter config + description: Node Status Exporter config + path: nodeStatusExporter + - displayName: Daemonsets config + description: Daemonsets config + path: daemonsets + - displayName: MIG config + description: MIG config + path: mig + - displayName: NVIDIA MIG Manager config + description: NVIDIA MIG Manager config + path: migManager + - displayName: PodSecurityPolicy config + description: PodSecurityPolicy config + path: psp + - displayName: NVIDIA GPUDirect Storage config + description: NVIDIA GPUDirect Storage config + path: gds + - displayName: Sandbox Workloads config + description: Sandbox Workloads config + path: sandboxWorkloads + - displayName: NVIDIA vGPU Manager config + description: NVIDIA vGPU Manager config + path: vgpuManager + - displayName: NVIDIA vGPU Device Manager config + description: NVIDIA vGPU Device Manager config + path: vgpuDeviceManager + - displayName: VFIO Manager config + description: VFIO Manager config + path: vfioManager + - displayName: NVIDIA Sandbox Device Plugin config + description: NVIDIA Sandbox Device Plugin config + path: sandboxDevicePlugin + statusDescriptors: + - description: The current state of the operator. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + displayName: NVIDIA GPU Operator + description: > + Kubernetes provides access to special hardware resources such as NVIDIA + GPUs, NICs, Infiniband adapters and other devices through the [device plugin + framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). + However, configuring and managing nodes with these hardware resources + requires configuration of multiple software components such as drivers, + container runtimes or other libraries which are difficult and prone to + errors. + + The NVIDIA GPU Operator uses the [operator + framework](https://cloud.redhat.com/blog/introducing-the-operator-framework) within + Kubernetes to automate the management of all NVIDIA software components + needed to provision and monitor GPUs. + These components include the NVIDIA drivers (to enable CUDA), Kubernetes + device plugin for GPUs, the NVIDIA Container Runtime, automatic node + labelling and NVIDIA DCGM exporter. + + Visit the official site of the [GPU Operator](https://github.com/NVIDIA/gpu-operator) for more information. + For getting started with using the GPU Operator with OpenShift, see the instructions + [here](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/contents.html). + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAB2lBMVEUAAAD///8EBAN3uQACAgIAAAQJDQUCAgB1tgAHCQf+/v5Ufg5Hagxxqwt+xgJ3uAB9wwB4vQBRUVEeLA3e3t5nZ2coKCgODg4FBwZ9wwR6wAJ4vADz8/MbGxt5tw1vpw1/wgoOFwkLDwh9xQH5+fny8vLw8PDFxcWysrKFhYVvb282NjYyMjIqKioXFxdikxRYgxNCYxJQdhFqoQ9xrg16ugxyqgyAxQkEBQj7+/v29vbIyMhjY2NbW1tHR0cvLy8kJCQdHR0ZGRlKbxJ8uhFNcxFVgBAxSBBgkg93tQ50sA4qPg4XIg18vwsbKQsSGgsLCwsMEwqCyQeByQFztADPz8+/v7+6urqWlpZra2tKSkogICASEhJmmRE8XBA5VRA2UBBonA9biA9GaQ4sQg4jMw4mOQ0aJw2GzgsUHgttpAqJ0Ql/wQWG0AJ8vwF0uQCtra2jo6OQkJB9fX1VVVVCQkI9PT0iIiIUFBRSfBNgjhA7WRBGZw+GywmFzgaAyASBxQN2twDb29u2traenp6Kiop+fn53d3dzc3NyqRV4sxM/YBNAXRElNhBjlQ+IzA00TQ16vgxJbgp6vAl4tgJ3vgDs7Ozn5+fa2trS0tJCXRY6VBV6thSL1gf4nFdFAAAD80lEQVRYw+zSOXPaQBgG4He0LJJmbGRGDUIzuvgBQiAEPfcdwC33DTbUtmOwSyc+4iRucvzXRImLFJmRShc8xXbfu+9+szg4OHjjAsH/iFD49q7rqM6xc/wPtWyBhS8sC94ObWRCZDksh1+RzmcEfI0DoPrjylEkSTgViMs9udjYTwMG4Gf51Z1BM81ioRwit+QvgYsdUQZeKFr3ladyKXvVr+pAM5uKcmRLXFzoCIxn+0i/8lSaBMHnfi7qowfQuZnm3PuFPwGs13zD3NlViozY/z4YD6/TCQORbPr2q78GLB0ou5IO40pd5AxQZnJ83m2y9Ju2JYKfgEhWC18aEIfrZLURHwQC0B87ySZwHxX8BNDWB1KfQfyxT2TA24uPQMt8yTWA3obz8wQGlhTN06Z900MkuJLrYu3u5LkK9LTtGRF8NEDLeSnXYLUdHUFVlpPqTa4IamlhJZ464biY1w4CKGrROOW7uwLlV+Q02lanCF6cbSoPVLzUfPwDll5I9T6WyXWhZre1yjiI6VCSzCWY3+FKaAwGHngzpEygx6+V6Uzk6TJR7yhWxJ1bFgTPJ7gMc58aUCq+n+qNT6Pn8y/xOcCiZZVjnJ+AAPhEuj0SKZ9bL9ZpNS9SgM6z9p5w3jt43cMvecfWBhm7dtfEpfhYMDBYpFd7mDZIAxPCFKgBhB0hkWbE2wVMyqycfhOMEiebSzFz5IMTEjw7E87UFj4GVR7GXqaSkoIcISEc/I38/PwhOTUMRBrADgwK09zgYGUBqbwcARiQyp3Eyk6kC4BloqtbJTcaSHIHShALWFmBSRuCWBGC+AtDMAAGIpAAc9mBiB0sCLSXHUSygxSxEIoE7IKEgbhopKgogC96x04QCMMw/H0cG6f0cEmBHaLc7FFQzApoTLwtQgWUWo26glx2mzGkyoHM1PPMO/NrnSH8e2QAiRsZ8S3ZuJoW5Udg5moGoMRLN2gAnkcUctueJ1gADsdtlZ2AgmSYoaDZBXwRctcwy6HN3XX/wfnTnA7Q5x0S0Gku4wHpe7Ql8Mbtu4TqC3qcADGtUl4O3eK0AkZdKH1mU/a6MFQGA7pQGoAVoAuuPYZlLJF2BawVLLjwac6Q8wUax61/CpKQAT6ZX3hFqoqqAFvuf4AzM+NgsoBS/wcSOD7SFzyf6CE9UQK9II1MRvIJm8QSgsLiBZuypsAWKyARElgx5FcLv1N4nFLbB45Sh6+TzsQRtn7bz/B3fS9GQ12bgUE2PKycQbwgXD0SWLwVhpZFq4eHhWloOjLoqGvoRYRGAR2vp2EtpNUaTUpiRAizMAEhKNXpYZNnAUlBCSgFYTIxQTlMMJNGwSgYBdQHAFsKs+/bUkeyAAAAAElFTkSuQmCC + mediatype: image/png + install: + spec: + clusterPermissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - nvidia.com + resources: + - clusterpolicies + - clusterpolicies/finalizers + - clusterpolicies/status + - nvidiadrivers + - nvidiadrivers/finalizers + - nvidiadrivers/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + - apiGroups: + - config.openshift.io + resources: + - clusterversions + - proxies + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + - create + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - hostmount-anyuid + - apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - list + - create + - watch + - update + - delete + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - pods + - pods/eviction + - services + - services/finalizers + - events + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - create + - watch + - update + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch + - apiGroups: + - node.k8s.io + resources: + - runtimeclasses + verbs: + - get + - list + - create + - update + - watch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + permissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - secrets + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + deployments: + - name: gpu-operator + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + strategy: {} + template: + metadata: + labels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + nvidia.com/gpu-driver-upgrade-drain.skip: "true" + spec: + priorityClassName: system-node-critical + containers: + - args: + - --leader-elect + - --leader-lease-renew-deadline + - "60s" + image: nvcr.io/nvidia/gpu-operator@sha256:d51c3a34aaa9a5dfbdd3b710ee18d9eaa50aa0fb3518bacd541053d77c5c1098 + command: + - gpu-operator + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: gpu-operator + ports: + - name: metrics + containerPort: 8080 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "VALIDATOR_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:0a48b6c65148358ab792b3dc23bce5d3e660e9176670f62864502f68647704f0" + - name: "GFD_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534" + - name: "CONTAINER_TOOLKIT_IMAGE" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:f95ef6a0c377e011bc0561c7d2c2bf32e45106fb0ba91ae9a10f97236ded0581" + - name: "DCGM_IMAGE" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:15dab1273345df4a5844c4c761d064dbc4b592101251dc39174e597137123027" + - name: "DCGM_EXPORTER_IMAGE" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:21f4c8b88716e8e6f732f9fb4c2efaef937c227491a8631c5e55036f80f39a4d" + - name: "DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534" + - name: "DRIVER_IMAGE" + value: "nvcr.io/nvidia/driver@sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415" + - name: "DRIVER_IMAGE-535" + value: "nvcr.io/nvidia/driver@sha256:a6d12fb5753f267dda25dfd38910f972bc632c006a24107fa50e20bba3642d7c" + - name: "DRIVER_IMAGE-470" + value: "nvcr.io/nvidia/driver@sha256:07e11f85d54d49ec9648fb06e148b8d832ee1f9c3549a915eee853c9ef2949c2" + - name: "DRIVER_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:740abc3ff657545c10effd5354f09af525200ed9a1b7623f0c2e8c7bd9e4a4e2" + - name: "MIG_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:781fb47e264d9e0fbc8da5bd046e5e678316c866bc36ddd4b56d4eb0de682d5b" + - name: "CUDA_BASE_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe" + - name: "VFIO_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:b0b6c9286f20432ba9becb711aff2d1c1bd56e47b33e6d1cab04aba926c067fe" + - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:969147c01d63be5d1fe458f32f1cc0c7408cf3062531db91408e2fc57b4d8a67" + - name: "VGPU_DEVICE_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:ae63fac9a4057a7646f0cf0ee0566e8928529adde05c4c0a017cda0599e381b2" + - name: "GDRCOPY_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:33de74eb590f071403c17b6c210c02963245851971168bc0c07c06c100a9f376" + terminationGracePeriodSeconds: 10 + volumes: + - hostPath: + path: /etc/os-release + name: host-os-release + serviceAccountName: gpu-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - gpu + - cuda + - compute + - operator + - deep learning + - monitoring + - tesla + maintainers: + - name: NVIDIA + email: operator_feedback@nvidia.com + maturity: stable + provider: + name: NVIDIA Corporation + version: 24.6.1 + replaces: gpu-operator-certified.v24.6.0 diff --git a/bundle/v24.6.1/manifests/nvidia.com_clusterpolicies.yaml b/bundle/v24.6.1/manifests/nvidia.com_clusterpolicies.yaml new file mode 100644 index 000000000..8e29cabf1 --- /dev/null +++ b/bundle/v24.6.1/manifests/nvidia.com_clusterpolicies.yaml @@ -0,0 +1,2404 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clusterpolicies.nvidia.com +spec: + group: nvidia.com + names: + kind: ClusterPolicy + listKind: ClusterPolicyList + plural: clusterpolicies + singular: clusterpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterPolicy is the Schema for the clusterpolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterPolicySpec defines the desired state of ClusterPolicy + properties: + ccManager: + description: CCManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + defaultMode: + description: Default CC mode setting for compatible GPUs on the + node + enum: + - "on" + - "off" + - devtools + type: string + enabled: + description: Enabled indicates if deployment of CC Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: CC Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: CC Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: CC Manager image tag + type: string + type: object + cdi: + description: CDI configures how the Container Device Interface is + used in the cluster + properties: + default: + default: false + description: Default indicates whether to use CDI as the default + mechanism for providing GPU access to containers. + type: boolean + enabled: + default: false + description: Enabled indicates whether CDI can be used to make + GPUs accessible to containers. + type: boolean + type: object + daemonsets: + description: Daemonset defines common configuration for all Daemonsets + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + priorityClassName: + type: string + rollingUpdate: + description: 'Optional: Configuration for rolling update of all + DaemonSet pods' + properties: + maxUnavailable: + type: string + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + updateStrategy: + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + type: string + type: object + dcgm: + description: DCGM component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Hostengine + as a separate pod is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + hostPort: + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' + format: int32 + type: integer + image: + description: NVIDIA DCGM image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA DCGM image tag + type: string + type: object + dcgmExporter: + description: DCGMExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom metrics configuration for NVIDIA + DCGM Exporter' + properties: + name: + description: ConfigMap name with file dcgm-metrics.csv for + metrics to be collected by NVIDIA DCGM Exporter + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Exporter + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA DCGM Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM Exporter image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serviceMonitor: + description: 'Optional: ServiceMonitor configuration for NVIDIA + DCGM Exporter' + properties: + additionalLabels: + additionalProperties: + type: string + description: AdditionalLabels to add to ServiceMonitor instance + for NVIDIA DCGM Exporter + type: object + enabled: + description: Enabled indicates if ServiceMonitor is deployed + for NVIDIA DCGM Exporter + type: boolean + honorLabels: + description: HonorLabels chooses the metric’s labels on collisions + with target labels. + type: boolean + interval: + description: |- + Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. + Supported units: y, w, d, h, m, s, ms + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + relabelings: + description: Relabelings allows to rewrite labels on metric + sets for NVIDIA DCGM Exporter + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + + Regex capture groups are available. + type: string + type: object + type: array + type: object + version: + description: NVIDIA DCGM Exporter image tag + type: string + type: object + devicePlugin: + description: DevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Configuration for the NVIDIA Device Plugin + via the ConfigMap' + properties: + default: + description: Default config name within the ConfigMap for + the NVIDIA Device Plugin config + type: string + name: + description: ConfigMap name for NVIDIA Device Plugin config + including shared config between plugin and GFD + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Device + Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object + repository: + description: NVIDIA Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Device Plugin image tag + type: string + type: object + driver: + description: Driver component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for + NVIDIA Driver container' + properties: + name: + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Driver + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters + for the NVIDIA Driver' + properties: + name: + type: string + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU + licensing' + properties: + configMapName: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System + is used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver + Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + rdma: + description: GPUDirectRDMASpec defines the properties for nvidia-peermem + deployment + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled + through GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + configMapName: + type: string + type: object + repository: + description: NVIDIA Driver image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + upgradePolicy: + description: Driver auto-upgrade settings + properties: + autoUpgrade: + default: false + description: |- + AutoUpgrade is a global switch for automatic upgrade feature + if set to false all other options are ignored + type: boolean + drain: + description: DrainSpec describes configuration for node drain + during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the node is drained) + type: boolean + enable: + default: false + description: Enable indicates if node draining is allowed + during upgrade + type: boolean + force: + default: false + description: Force indicates if force draining is allowed + type: boolean + podSelector: + description: |- + PodSelector specifies a label selector to filter pods on the node that need to be drained + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 300 + description: TimeoutSecond specifies the length of time + in seconds to wait before giving up drain, zero means + infinite + minimum: 0 + type: integer + type: object + maxParallelUpgrades: + default: 1 + description: |- + MaxParallelUpgrades indicates how many nodes can be upgraded in parallel + 0 means no limit, all nodes will be upgraded in parallel + minimum: 0 + type: integer + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. + Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). + Absolute number is calculated from percentage by rounding up. + By default, a fixed value of 25% is used. + x-kubernetes-int-or-string: true + podDeletion: + description: PodDeletionSpec describes configuration for deletion + of pods using special resources during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the pod is deleted) + type: boolean + force: + default: false + description: Force indicates if force deletion is allowed + type: boolean + timeoutSeconds: + default: 300 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + waitForCompletion: + description: WaitForCompletionSpec describes the configuration + for waiting on job completions + properties: + podSelector: + description: |- + PodSelector specifies a label selector for the pods to wait for completion + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 0 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + type: object + useNvidiaDriverCRD: + description: UseNvidiaDriverCRD indicates if the deployment of + NVIDIA Driver is managed by the NVIDIADriver CRD type + type: boolean + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA + Driver using pre-compiled modules is enabled + type: boolean + version: + description: NVIDIA Driver image tag + type: string + virtualTopology: + description: 'Optional: Virtual Topology Daemon configuration + for NVIDIA vGPU drivers' + properties: + config: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + type: object + gdrcopy: + description: GDRCopy component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + Operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GDRCopy driver image repository + type: string + version: + description: NVIDIA GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS components(Experimental) + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + gfd: + description: GPUFeatureDiscovery spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of GPU Feature Discovery + Plugin is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GFD image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GFD image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: GFD image tag + type: string + type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object + kataManager: + description: KataManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: Kata Manager config + properties: + artifactsDir: + default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses + description: |- + ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) + are placed on the local filesystem. + type: string + runtimeClasses: + description: RuntimeClasses is a list of kata runtime classes + to configure. + items: + description: RuntimeClass defines the configuration for + a kata RuntimeClass + properties: + artifacts: + description: Artifacts are the kata artifacts associated + with the runtime class. + properties: + pullSecret: + description: PullSecret is the secret used to pull + the OCI artifact. + type: string + url: + description: |- + URL is the path to the OCI artifact (payload) containing all artifacts + associated with a kata runtime class. + type: string + required: + - url + type: object + name: + description: Name is the name of the kata runtime class. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector specifies the nodeSelector for the RuntimeClass object. + This ensures pods running with the RuntimeClass only get scheduled + onto nodes which support it. + type: object + required: + - artifacts + - name + type: object + type: array + type: object + enabled: + description: Enabled indicates if deployment of Kata Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Kata Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Kata Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Kata Manager image tag + type: string + type: object + mig: + description: MIG spec + properties: + strategy: + description: 'Optional: MIGStrategy to apply for GFD and NVIDIA + Device Plugin' + enum: + - none + - single + - mixed + type: string + type: object + migManager: + description: MIGManager for configuration to deploy MIG Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom mig-parted configuration for NVIDIA + MIG Manager container' + properties: + default: + default: all-disabled + description: Default MIG config to be applied on the node, + when there is no config specified with the node label nvidia.com/mig.config + enum: + - all-disabled + - "" + type: string + name: + default: default-mig-parted-config + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA MIG Manager + is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gpuClientsConfig: + description: 'Optional: Custom gpu-clients configuration for NVIDIA + MIG Manager container' + properties: + name: + description: ConfigMap name + type: string + type: object + image: + description: NVIDIA MIG Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA MIG Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA MIG Manager image tag + type: string + type: object + nodeStatusExporter: + description: NodeStatusExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of Node Status Exporter + is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Node Status Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Node Status Exporterimage repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Node Status Exporterimage tag + type: string + type: object + operator: + description: Operator component spec + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + defaultRuntime: + default: docker + description: Runtime defines container runtime type + enum: + - docker + - crio + - containerd + type: string + initContainer: + description: InitContainerSpec describes configuration for initContainer + image used with all components + properties: + image: + description: Image represents image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents image repository path + type: string + version: + description: Version represents image tag(version) + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + runtimeClass: + default: nvidia + type: string + use_ocp_driver_toolkit: + description: UseOpenShiftDriverToolkit indicates if DriverToolkit + image should be used on OpenShift to build and install driver + modules + type: boolean + required: + - defaultRuntime + type: object + psa: + description: PSA defines spec for PodSecurityAdmission configuration + properties: + enabled: + description: Enabled indicates if PodSecurityAdmission configuration + needs to be enabled for all Pods + type: boolean + type: object + psp: + description: |- + Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead + PSP defines spec for handling PodSecurityPolicies + properties: + enabled: + description: Enabled indicates if PodSecurityPolicies needs to + be enabled for all Pods + type: boolean + type: object + sandboxDevicePlugin: + description: SandboxDevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Sandbox + Device Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Sandbox Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA Sandbox Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Sandbox Device Plugin image tag + type: string + type: object + sandboxWorkloads: + description: SandboxWorkloads defines the spec for handling sandbox + workloads (i.e. Virtual Machines) + properties: + defaultWorkload: + default: container + description: |- + DefaultWorkload indicates the default GPU workload type to configure + worker nodes in the cluster for + enum: + - container + - vm-passthrough + - vm-vgpu + type: string + enabled: + description: |- + Enabled indicates if the GPU Operator should manage additional operands required + for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) + type: boolean + type: object + toolkit: + description: Toolkit component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Container + Toolkit through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Container Toolkit image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + installDir: + default: /usr/local/nvidia + description: Toolkit install directory on the host + type: string + repository: + description: NVIDIA Container Toolkit image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Container Toolkit image tag + type: string + type: object + validator: + description: Validator defines the spec for operator-validator daemonset + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + cuda: + description: CUDA validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + driver: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Validator image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + plugin: + description: Plugin validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + repository: + description: Validator image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toolkit: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + version: + description: Validator image tag + type: string + vfioPCI: + description: VfioPCI validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuDevices: + description: VGPUDevices validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuManager: + description: VGPUManager validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + type: object + vfioManager: + description: VFIOManager for configuration to deploy VFIO-PCI Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of VFIO Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: VFIO Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: VFIO Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: VFIO Manager image tag + type: string + type: object + vgpuDeviceManager: + description: VGPUDeviceManager spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: NVIDIA vGPU devices configuration for NVIDIA vGPU + Device Manager container + properties: + default: + default: default + description: Default config name within the ConfigMap + type: string + name: + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Device + Manager is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Device Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Device Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Device Manager image tag + type: string + type: object + vgpuManager: + description: VGPUManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Manager + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Manager image tag + type: string + type: object + required: + - daemonsets + - dcgm + - dcgmExporter + - devicePlugin + - driver + - gfd + - nodeStatusExporter + - operator + - toolkit + type: object + status: + description: ClusterPolicyStatus defines the observed state of ClusterPolicy + properties: + conditions: + description: Conditions is a list of conditions representing the ClusterPolicy's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + is installed + type: string + state: + description: State indicates status of ClusterPolicy + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.6.1/manifests/nvidia.com_nvidiadrivers.yaml b/bundle/v24.6.1/manifests/nvidia.com_nvidiadrivers.yaml new file mode 100644 index 000000000..665088edd --- /dev/null +++ b/bundle/v24.6.1/manifests/nvidia.com_nvidiadrivers.yaml @@ -0,0 +1,810 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: nvidiadrivers.nvidia.com +spec: + group: nvidia.com + names: + kind: NVIDIADriver + listKind: NVIDIADriverList + plural: nvidiadrivers + shortNames: + - nvd + - nvdriver + - nvdrivers + singular: nvidiadriver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: NVIDIADriver is the Schema for the nvidiadrivers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NVIDIADriverSpec defines the desired state of NVIDIADriver + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for NVIDIA + Driver container' + properties: + name: + type: string + type: object + driverType: + default: gpu + description: DriverType defines NVIDIA driver type + enum: + - gpu + - vgpu + - vgpu-host-manager + type: string + x-kubernetes-validations: + - message: driverType is an immutable field. Please create a new NvidiaDriver + resource instead when you want to change this setting. + rule: self == oldSelf + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gdrcopy: + description: GDRCopy defines the spec for GDRCopy driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GDRCopy diver image repository + type: string + version: + description: GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + image: + default: nvcr.io/nvidia/driver + description: NVIDIA Driver container image name + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters for + the NVIDIA Driver' + properties: + name: + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU licensing' + properties: + name: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System is + used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver Manager + initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository path + type: string + version: + description: Version represents NVIDIA Driver Manager image tag(version) + type: string + type: object + nodeAffinity: + description: Affinity specifies node affinity rules for driver pods + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a selector for installation of + NVIDIA driver + type: object + priorityClassName: + description: 'Optional: Set priorityClassName' + type: string + rdma: + description: GPUDirectRDMA defines the spec for NVIDIA Peer Memory + driver + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled through + GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + name: + type: string + type: object + repository: + description: NVIDIA Driver repository + type: string + resources: + description: 'Optional: Define resources requests and limits for each + pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA Driver + using pre-compiled modules is enabled + type: boolean + x-kubernetes-validations: + - message: usePrecompiled is an immutable field. Please create a new + NvidiaDriver resource instead when you want to change this setting. + rule: self == oldSelf + version: + description: NVIDIA Driver version (or just branch for precompiled + drivers) + type: string + virtualTopologyConfig: + description: 'Optional: Virtual Topology Daemon configuration for + NVIDIA vGPU drivers' + properties: + name: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + required: + - driverType + - image + type: object + status: + description: NVIDIADriverStatus defines the observed state of NVIDIADriver + properties: + conditions: + description: Conditions is a list of conditions representing the NVIDIADriver's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + and driver are installed + type: string + state: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + State indicates status of NVIDIADriver instance + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.6.1/metadata/annotations.yaml b/bundle/v24.6.1/metadata/annotations.yaml new file mode 100644 index 000000000..f7383d5c4 --- /dev/null +++ b/bundle/v24.6.1/metadata/annotations.yaml @@ -0,0 +1,17 @@ +annotations: + operators.operatorframework.io.bundle.channels.v1: stable,v24.6 + operators.operatorframework.io.bundle.channel.default.v1: v24.6 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: gpu-operator-certified + operators.operatorframework.io.metrics.builder: operator-sdk-v1.4.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: nvidia-gpu-operator + + # Annotations to specify OCP versions compatibility. + com.redhat.openshift.versions: v4.12-v4.16 diff --git a/bundle/v24.6.2/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/v24.6.2/manifests/gpu-operator-certified.clusterserviceversion.yaml new file mode 100644 index 000000000..7d1d1dda5 --- /dev/null +++ b/bundle/v24.6.2/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -0,0 +1,921 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + labels: + operatorframework.io/arch.arm64: supported + operatorframework.io/arch.amd64: supported + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + annotations: + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + features.operators.openshift.io/cnf: "false" + features.operators.openshift.io/cni: "false" + features.operators.openshift.io/csi: "false" + olm.skipRange: '>=1.9.0 <24.6.2' + alm-examples: |- + [ + { + "apiVersion": "nvidia.com/v1", + "kind": "ClusterPolicy", + "metadata": { + "name": "gpu-cluster-policy" + }, + "spec": { + "operator": { + "defaultRuntime": "crio", + "use_ocp_driver_toolkit": true, + "initContainer": { + } + }, + "sandboxWorkloads": { + "enabled": false, + "defaultWorkload": "container" + }, + "driver": { + "enabled": true, + "useNvidiaDriverCRD": false, + "useOpenKernelModules": false, + "upgradePolicy": { + "autoUpgrade": true, + "drain": { + "deleteEmptyDir": false, + "enable": false, + "force": false, + "timeoutSeconds": 300 + }, + "maxParallelUpgrades": 1, + "maxUnavailable": "25%", + "podDeletion": { + "deleteEmptyDir": false, + "force": false, + "timeoutSeconds": 300 + }, + "waitForCompletion": { + "timeoutSeconds": 0 + } + }, + "repoConfig": { + "configMapName": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "configMapName": "" + }, + "virtualTopology": { + "config": "" + }, + "kernelModuleConfig": { + "name": "" + } + }, + "dcgmExporter": { + "enabled": true, + "config": { + "name": "" + }, + "serviceMonitor": { + "enabled": true + } + }, + "dcgm": { + "enabled": true + }, + "daemonsets": { + "updateStrategy": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "1" + } + }, + "devicePlugin": { + "enabled": true, + "config": { + "name": "", + "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" + } + }, + "gfd": { + "enabled": true + }, + "migManager": { + "enabled": true + }, + "nodeStatusExporter": { + "enabled": true + }, + "mig": { + "strategy": "single" + }, + "toolkit": { + "enabled": true + }, + "validator": { + "plugin": { + "env": [ + { + "name": "WITH_WORKLOAD", + "value": "false" + } + ] + } + }, + "vgpuManager": { + "enabled": false + }, + "vgpuDeviceManager": { + "enabled": true + }, + "sandboxDevicePlugin": { + "enabled": true + }, + "vfioManager": { + "enabled": true + }, + "gds": { + "enabled": false + }, + "gdrcopy": { + "enabled": false + } + } + }, + { + "apiVersion": "nvidia.com/v1alpha1", + "kind": "NVIDIADriver", + "metadata": { + "name": "gpu-driver" + }, + "spec": { + "driverType": "gpu", + "repository": "nvcr.io/nvidia", + "image": "driver", + "version": "sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415", + "nodeSelector": {}, + "manager": {}, + "repoConfig": { + "name": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "name": "" + }, + "virtualTopologyConfig": { + "name": "" + }, + "kernelModuleConfig": { + "name": "" + } + } + } + ] + operators.operatorframework.io/builder: operator-sdk-v1.4.0 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operatorframework.io/suggested-namespace: nvidia-gpu-operator + capabilities: Deep Insights + categories: AI/Machine Learning, OpenShift Optional + certified: "true" + containerImage: nvcr.io/nvidia/gpu-operator@sha256:8e0969cffc030a89c4acd68e64d41dd54e3bce8a794106b178d4dbd636a07f1c + createdAt: "Wed Sep 25 08:25:16 PDT 2024" + description: Automate the management and monitoring of NVIDIA GPUs. + provider: NVIDIA + repository: http://github.com/NVIDIA/gpu-operator + support: NVIDIA + name: gpu-operator-certified.v24.6.2 + namespace: placeholder +spec: + apiservicedefinitions: {} + relatedImages: + - name: gpu-operator-image + image: nvcr.io/nvidia/gpu-operator@sha256:8e0969cffc030a89c4acd68e64d41dd54e3bce8a794106b178d4dbd636a07f1c + - name: dcgm-exporter-image + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:21f4c8b88716e8e6f732f9fb4c2efaef937c227491a8631c5e55036f80f39a4d + - name: dcgm-image + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:15dab1273345df4a5844c4c761d064dbc4b592101251dc39174e597137123027 + - name: container-toolkit-image + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:7bcc188703f2fac630f7a4ff8960e6733ac3a29adf6a946533b796d9a27b8acf + - name: driver-image + image: nvcr.io/nvidia/driver@sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415 + - name: driver-image-535 + image: nvcr.io/nvidia/driver@sha256:a6d12fb5753f267dda25dfd38910f972bc632c006a24107fa50e20bba3642d7c + - name: driver-image-470 + image: nvcr.io/nvidia/driver@sha256:07e11f85d54d49ec9648fb06e148b8d832ee1f9c3549a915eee853c9ef2949c2 + - name: device-plugin-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534 + - name: gpu-feature-discovery-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534 + - name: mig-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:781fb47e264d9e0fbc8da5bd046e5e678316c866bc36ddd4b56d4eb0de682d5b + - name: init-container-image + image: nvcr.io/nvidia/cuda@sha256:de5b598bca89850567c4c104411d66bb52f47c9179199e6a3be6829b7ac586a2 + - name: gpu-operator-validator-image + image: nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:96380b95396b7f29d2ed2ec8ef1ad5a8bcf9f55051db723295dc0a20db845331 + - name: k8s-driver-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:740abc3ff657545c10effd5354f09af525200ed9a1b7623f0c2e8c7bd9e4a4e2 + - name: vfio-manager-image + image: nvcr.io/nvidia/cuda@sha256:de5b598bca89850567c4c104411d66bb52f47c9179199e6a3be6829b7ac586a2 + - name: sandbox-device-plugin-image + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:969147c01d63be5d1fe458f32f1cc0c7408cf3062531db91408e2fc57b4d8a67 + - name: vgpu-device-manager-image + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:ae63fac9a4057a7646f0cf0ee0566e8928529adde05c4c0a017cda0599e381b2 + - name: gdrcopy-image + image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:33de74eb590f071403c17b6c210c02963245851971168bc0c07c06c100a9f376 + customresourcedefinitions: + owned: + - name: nvidiadrivers.nvidia.com + kind: NVIDIADriver + version: v1alpha1 + displayName: NVIDIADriver + description: NVIDIADriver allows you to deploy the NVIDIA driver + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + statusDescriptors: + - description: The current state of the driver. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + - name: clusterpolicies.nvidia.com + kind: ClusterPolicy + version: v1 + group: nvidia.com + displayName: ClusterPolicy + description: ClusterPolicy allows you to configure the GPU Operator + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: Deployment + name: '' + version: apps/v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: GPU Operator config + displayName: GPU Operator config + path: operator + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: operator.validator.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: Image pull secrets + displayName: Image pull secrets + path: operator.validator.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - description: NVIDIA GPU/vGPU Driver config + displayName: NVIDIA GPU/vGPU Driver config + path: driver + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: driver.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: driver.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: driver.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: driver.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA DCGM Exporter config + displayName: NVIDIA DCGM Exporter config + path: dcgmExporter + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: dcgmExporter.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: dcgmExporter.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: dcgmExporter.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: dcgmExporter.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: dcgmExporter.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Device Plugin config + displayName: NVIDIA Device Plugin config + path: devicePlugin + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: devicePlugin.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: devicePlugin.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: devicePlugin.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: devicePlugin.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: devicePlugin.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: GPU Feature Discovery Plugin config + displayName: GPU Feature Discovery Plugin config + path: gfd + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: gfd.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: gfd.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: gfd.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: gfd.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: gfd.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Container Toolkit config + displayName: NVIDIA Container Toolkit config + path: toolkit + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: toolkit.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: toolkit.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: toolkit.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: toolkit.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: toolkit.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - displayName: NVIDIA DCGM config + description: NVIDIA DCGM config + path: dcgm + - displayName: Validator config + description: Validator config + path: validator + - displayName: Node Status Exporter config + description: Node Status Exporter config + path: nodeStatusExporter + - displayName: Daemonsets config + description: Daemonsets config + path: daemonsets + - displayName: MIG config + description: MIG config + path: mig + - displayName: NVIDIA MIG Manager config + description: NVIDIA MIG Manager config + path: migManager + - displayName: PodSecurityPolicy config + description: PodSecurityPolicy config + path: psp + - displayName: NVIDIA GPUDirect Storage config + description: NVIDIA GPUDirect Storage config + path: gds + - displayName: Sandbox Workloads config + description: Sandbox Workloads config + path: sandboxWorkloads + - displayName: NVIDIA vGPU Manager config + description: NVIDIA vGPU Manager config + path: vgpuManager + - displayName: NVIDIA vGPU Device Manager config + description: NVIDIA vGPU Device Manager config + path: vgpuDeviceManager + - displayName: VFIO Manager config + description: VFIO Manager config + path: vfioManager + - displayName: NVIDIA Sandbox Device Plugin config + description: NVIDIA Sandbox Device Plugin config + path: sandboxDevicePlugin + statusDescriptors: + - description: The current state of the operator. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + displayName: NVIDIA GPU Operator + description: > + Kubernetes provides access to special hardware resources such as NVIDIA + GPUs, NICs, Infiniband adapters and other devices through the [device plugin + framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). + However, configuring and managing nodes with these hardware resources + requires configuration of multiple software components such as drivers, + container runtimes or other libraries which are difficult and prone to + errors. + + The NVIDIA GPU Operator uses the [operator + framework](https://cloud.redhat.com/blog/introducing-the-operator-framework) within + Kubernetes to automate the management of all NVIDIA software components + needed to provision and monitor GPUs. + These components include the NVIDIA drivers (to enable CUDA), Kubernetes + device plugin for GPUs, the NVIDIA Container Runtime, automatic node + labelling and NVIDIA DCGM exporter. + + Visit the official site of the [GPU Operator](https://github.com/NVIDIA/gpu-operator) for more information. + For getting started with using the GPU Operator with OpenShift, see the instructions + [here](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/contents.html). + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAB2lBMVEUAAAD///8EBAN3uQACAgIAAAQJDQUCAgB1tgAHCQf+/v5Ufg5Hagxxqwt+xgJ3uAB9wwB4vQBRUVEeLA3e3t5nZ2coKCgODg4FBwZ9wwR6wAJ4vADz8/MbGxt5tw1vpw1/wgoOFwkLDwh9xQH5+fny8vLw8PDFxcWysrKFhYVvb282NjYyMjIqKioXFxdikxRYgxNCYxJQdhFqoQ9xrg16ugxyqgyAxQkEBQj7+/v29vbIyMhjY2NbW1tHR0cvLy8kJCQdHR0ZGRlKbxJ8uhFNcxFVgBAxSBBgkg93tQ50sA4qPg4XIg18vwsbKQsSGgsLCwsMEwqCyQeByQFztADPz8+/v7+6urqWlpZra2tKSkogICASEhJmmRE8XBA5VRA2UBBonA9biA9GaQ4sQg4jMw4mOQ0aJw2GzgsUHgttpAqJ0Ql/wQWG0AJ8vwF0uQCtra2jo6OQkJB9fX1VVVVCQkI9PT0iIiIUFBRSfBNgjhA7WRBGZw+GywmFzgaAyASBxQN2twDb29u2traenp6Kiop+fn53d3dzc3NyqRV4sxM/YBNAXRElNhBjlQ+IzA00TQ16vgxJbgp6vAl4tgJ3vgDs7Ozn5+fa2trS0tJCXRY6VBV6thSL1gf4nFdFAAAD80lEQVRYw+zSOXPaQBgG4He0LJJmbGRGDUIzuvgBQiAEPfcdwC33DTbUtmOwSyc+4iRucvzXRImLFJmRShc8xXbfu+9+szg4OHjjAsH/iFD49q7rqM6xc/wPtWyBhS8sC94ObWRCZDksh1+RzmcEfI0DoPrjylEkSTgViMs9udjYTwMG4Gf51Z1BM81ioRwit+QvgYsdUQZeKFr3ladyKXvVr+pAM5uKcmRLXFzoCIxn+0i/8lSaBMHnfi7qowfQuZnm3PuFPwGs13zD3NlViozY/z4YD6/TCQORbPr2q78GLB0ou5IO40pd5AxQZnJ83m2y9Ju2JYKfgEhWC18aEIfrZLURHwQC0B87ySZwHxX8BNDWB1KfQfyxT2TA24uPQMt8yTWA3obz8wQGlhTN06Z900MkuJLrYu3u5LkK9LTtGRF8NEDLeSnXYLUdHUFVlpPqTa4IamlhJZ464biY1w4CKGrROOW7uwLlV+Q02lanCF6cbSoPVLzUfPwDll5I9T6WyXWhZre1yjiI6VCSzCWY3+FKaAwGHngzpEygx6+V6Uzk6TJR7yhWxJ1bFgTPJ7gMc58aUCq+n+qNT6Pn8y/xOcCiZZVjnJ+AAPhEuj0SKZ9bL9ZpNS9SgM6z9p5w3jt43cMvecfWBhm7dtfEpfhYMDBYpFd7mDZIAxPCFKgBhB0hkWbE2wVMyqycfhOMEiebSzFz5IMTEjw7E87UFj4GVR7GXqaSkoIcISEc/I38/PwhOTUMRBrADgwK09zgYGUBqbwcARiQyp3Eyk6kC4BloqtbJTcaSHIHShALWFmBSRuCWBGC+AtDMAAGIpAAc9mBiB0sCLSXHUSygxSxEIoE7IKEgbhopKgogC96x04QCMMw/H0cG6f0cEmBHaLc7FFQzApoTLwtQgWUWo26glx2mzGkyoHM1PPMO/NrnSH8e2QAiRsZ8S3ZuJoW5Udg5moGoMRLN2gAnkcUctueJ1gADsdtlZ2AgmSYoaDZBXwRctcwy6HN3XX/wfnTnA7Q5x0S0Gku4wHpe7Ql8Mbtu4TqC3qcADGtUl4O3eK0AkZdKH1mU/a6MFQGA7pQGoAVoAuuPYZlLJF2BawVLLjwac6Q8wUax61/CpKQAT6ZX3hFqoqqAFvuf4AzM+NgsoBS/wcSOD7SFzyf6CE9UQK9II1MRvIJm8QSgsLiBZuypsAWKyARElgx5FcLv1N4nFLbB45Sh6+TzsQRtn7bz/B3fS9GQ12bgUE2PKycQbwgXD0SWLwVhpZFq4eHhWloOjLoqGvoRYRGAR2vp2EtpNUaTUpiRAizMAEhKNXpYZNnAUlBCSgFYTIxQTlMMJNGwSgYBdQHAFsKs+/bUkeyAAAAAElFTkSuQmCC + mediatype: image/png + install: + spec: + clusterPermissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - nvidia.com + resources: + - clusterpolicies + - clusterpolicies/finalizers + - clusterpolicies/status + - nvidiadrivers + - nvidiadrivers/finalizers + - nvidiadrivers/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + - apiGroups: + - config.openshift.io + resources: + - clusterversions + - proxies + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + - create + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - hostmount-anyuid + - apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - list + - create + - watch + - update + - delete + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - pods + - pods/eviction + - services + - services/finalizers + - events + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - create + - watch + - update + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch + - apiGroups: + - node.k8s.io + resources: + - runtimeclasses + verbs: + - get + - list + - create + - update + - watch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + permissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - secrets + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + deployments: + - name: gpu-operator + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + strategy: {} + template: + metadata: + labels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + nvidia.com/gpu-driver-upgrade-drain.skip: "true" + spec: + priorityClassName: system-node-critical + containers: + - args: + - --leader-elect + - --leader-lease-renew-deadline + - "60s" + image: nvcr.io/nvidia/gpu-operator@sha256:8e0969cffc030a89c4acd68e64d41dd54e3bce8a794106b178d4dbd636a07f1c + command: + - gpu-operator + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: gpu-operator + ports: + - name: metrics + containerPort: 8080 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "VALIDATOR_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:96380b95396b7f29d2ed2ec8ef1ad5a8bcf9f55051db723295dc0a20db845331" + - name: "GFD_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534" + - name: "CONTAINER_TOOLKIT_IMAGE" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:7bcc188703f2fac630f7a4ff8960e6733ac3a29adf6a946533b796d9a27b8acf" + - name: "DCGM_IMAGE" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:15dab1273345df4a5844c4c761d064dbc4b592101251dc39174e597137123027" + - name: "DCGM_EXPORTER_IMAGE" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:21f4c8b88716e8e6f732f9fb4c2efaef937c227491a8631c5e55036f80f39a4d" + - name: "DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7ad2c9f71fe06f9f7745ac8635f46740fbdff4f11edd468addfab81afcdfa534" + - name: "DRIVER_IMAGE" + value: "nvcr.io/nvidia/driver@sha256:858de27c152669f5a3cf4287406405b16dd5bb70c0373324eb735511997bb415" + - name: "DRIVER_IMAGE-535" + value: "nvcr.io/nvidia/driver@sha256:a6d12fb5753f267dda25dfd38910f972bc632c006a24107fa50e20bba3642d7c" + - name: "DRIVER_IMAGE-470" + value: "nvcr.io/nvidia/driver@sha256:07e11f85d54d49ec9648fb06e148b8d832ee1f9c3549a915eee853c9ef2949c2" + - name: "DRIVER_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:740abc3ff657545c10effd5354f09af525200ed9a1b7623f0c2e8c7bd9e4a4e2" + - name: "MIG_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:781fb47e264d9e0fbc8da5bd046e5e678316c866bc36ddd4b56d4eb0de682d5b" + - name: "CUDA_BASE_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:de5b598bca89850567c4c104411d66bb52f47c9179199e6a3be6829b7ac586a2" + - name: "VFIO_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:de5b598bca89850567c4c104411d66bb52f47c9179199e6a3be6829b7ac586a2" + - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:969147c01d63be5d1fe458f32f1cc0c7408cf3062531db91408e2fc57b4d8a67" + - name: "VGPU_DEVICE_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:ae63fac9a4057a7646f0cf0ee0566e8928529adde05c4c0a017cda0599e381b2" + - name: "GDRCOPY_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:33de74eb590f071403c17b6c210c02963245851971168bc0c07c06c100a9f376" + terminationGracePeriodSeconds: 10 + volumes: + - hostPath: + path: /etc/os-release + name: host-os-release + serviceAccountName: gpu-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - gpu + - cuda + - compute + - operator + - deep learning + - monitoring + - tesla + maintainers: + - name: NVIDIA + email: operator_feedback@nvidia.com + maturity: stable + provider: + name: NVIDIA Corporation + version: 24.6.2 + replaces: gpu-operator-certified.v24.6.1 diff --git a/bundle/v24.6.2/manifests/nvidia.com_clusterpolicies.yaml b/bundle/v24.6.2/manifests/nvidia.com_clusterpolicies.yaml new file mode 100644 index 000000000..8e29cabf1 --- /dev/null +++ b/bundle/v24.6.2/manifests/nvidia.com_clusterpolicies.yaml @@ -0,0 +1,2404 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clusterpolicies.nvidia.com +spec: + group: nvidia.com + names: + kind: ClusterPolicy + listKind: ClusterPolicyList + plural: clusterpolicies + singular: clusterpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterPolicy is the Schema for the clusterpolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterPolicySpec defines the desired state of ClusterPolicy + properties: + ccManager: + description: CCManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + defaultMode: + description: Default CC mode setting for compatible GPUs on the + node + enum: + - "on" + - "off" + - devtools + type: string + enabled: + description: Enabled indicates if deployment of CC Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: CC Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: CC Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: CC Manager image tag + type: string + type: object + cdi: + description: CDI configures how the Container Device Interface is + used in the cluster + properties: + default: + default: false + description: Default indicates whether to use CDI as the default + mechanism for providing GPU access to containers. + type: boolean + enabled: + default: false + description: Enabled indicates whether CDI can be used to make + GPUs accessible to containers. + type: boolean + type: object + daemonsets: + description: Daemonset defines common configuration for all Daemonsets + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + priorityClassName: + type: string + rollingUpdate: + description: 'Optional: Configuration for rolling update of all + DaemonSet pods' + properties: + maxUnavailable: + type: string + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + updateStrategy: + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + type: string + type: object + dcgm: + description: DCGM component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Hostengine + as a separate pod is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + hostPort: + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' + format: int32 + type: integer + image: + description: NVIDIA DCGM image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA DCGM image tag + type: string + type: object + dcgmExporter: + description: DCGMExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom metrics configuration for NVIDIA + DCGM Exporter' + properties: + name: + description: ConfigMap name with file dcgm-metrics.csv for + metrics to be collected by NVIDIA DCGM Exporter + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Exporter + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA DCGM Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM Exporter image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serviceMonitor: + description: 'Optional: ServiceMonitor configuration for NVIDIA + DCGM Exporter' + properties: + additionalLabels: + additionalProperties: + type: string + description: AdditionalLabels to add to ServiceMonitor instance + for NVIDIA DCGM Exporter + type: object + enabled: + description: Enabled indicates if ServiceMonitor is deployed + for NVIDIA DCGM Exporter + type: boolean + honorLabels: + description: HonorLabels chooses the metric’s labels on collisions + with target labels. + type: boolean + interval: + description: |- + Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. + Supported units: y, w, d, h, m, s, ms + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + relabelings: + description: Relabelings allows to rewrite labels on metric + sets for NVIDIA DCGM Exporter + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + + Regex capture groups are available. + type: string + type: object + type: array + type: object + version: + description: NVIDIA DCGM Exporter image tag + type: string + type: object + devicePlugin: + description: DevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Configuration for the NVIDIA Device Plugin + via the ConfigMap' + properties: + default: + description: Default config name within the ConfigMap for + the NVIDIA Device Plugin config + type: string + name: + description: ConfigMap name for NVIDIA Device Plugin config + including shared config between plugin and GFD + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Device + Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object + repository: + description: NVIDIA Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Device Plugin image tag + type: string + type: object + driver: + description: Driver component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for + NVIDIA Driver container' + properties: + name: + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Driver + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters + for the NVIDIA Driver' + properties: + name: + type: string + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU + licensing' + properties: + configMapName: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System + is used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver + Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + rdma: + description: GPUDirectRDMASpec defines the properties for nvidia-peermem + deployment + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled + through GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + configMapName: + type: string + type: object + repository: + description: NVIDIA Driver image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + upgradePolicy: + description: Driver auto-upgrade settings + properties: + autoUpgrade: + default: false + description: |- + AutoUpgrade is a global switch for automatic upgrade feature + if set to false all other options are ignored + type: boolean + drain: + description: DrainSpec describes configuration for node drain + during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the node is drained) + type: boolean + enable: + default: false + description: Enable indicates if node draining is allowed + during upgrade + type: boolean + force: + default: false + description: Force indicates if force draining is allowed + type: boolean + podSelector: + description: |- + PodSelector specifies a label selector to filter pods on the node that need to be drained + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 300 + description: TimeoutSecond specifies the length of time + in seconds to wait before giving up drain, zero means + infinite + minimum: 0 + type: integer + type: object + maxParallelUpgrades: + default: 1 + description: |- + MaxParallelUpgrades indicates how many nodes can be upgraded in parallel + 0 means no limit, all nodes will be upgraded in parallel + minimum: 0 + type: integer + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. + Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). + Absolute number is calculated from percentage by rounding up. + By default, a fixed value of 25% is used. + x-kubernetes-int-or-string: true + podDeletion: + description: PodDeletionSpec describes configuration for deletion + of pods using special resources during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the pod is deleted) + type: boolean + force: + default: false + description: Force indicates if force deletion is allowed + type: boolean + timeoutSeconds: + default: 300 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + waitForCompletion: + description: WaitForCompletionSpec describes the configuration + for waiting on job completions + properties: + podSelector: + description: |- + PodSelector specifies a label selector for the pods to wait for completion + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 0 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + type: object + useNvidiaDriverCRD: + description: UseNvidiaDriverCRD indicates if the deployment of + NVIDIA Driver is managed by the NVIDIADriver CRD type + type: boolean + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA + Driver using pre-compiled modules is enabled + type: boolean + version: + description: NVIDIA Driver image tag + type: string + virtualTopology: + description: 'Optional: Virtual Topology Daemon configuration + for NVIDIA vGPU drivers' + properties: + config: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + type: object + gdrcopy: + description: GDRCopy component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + Operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GDRCopy driver image repository + type: string + version: + description: NVIDIA GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS components(Experimental) + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + gfd: + description: GPUFeatureDiscovery spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of GPU Feature Discovery + Plugin is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GFD image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GFD image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: GFD image tag + type: string + type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object + kataManager: + description: KataManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: Kata Manager config + properties: + artifactsDir: + default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses + description: |- + ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) + are placed on the local filesystem. + type: string + runtimeClasses: + description: RuntimeClasses is a list of kata runtime classes + to configure. + items: + description: RuntimeClass defines the configuration for + a kata RuntimeClass + properties: + artifacts: + description: Artifacts are the kata artifacts associated + with the runtime class. + properties: + pullSecret: + description: PullSecret is the secret used to pull + the OCI artifact. + type: string + url: + description: |- + URL is the path to the OCI artifact (payload) containing all artifacts + associated with a kata runtime class. + type: string + required: + - url + type: object + name: + description: Name is the name of the kata runtime class. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector specifies the nodeSelector for the RuntimeClass object. + This ensures pods running with the RuntimeClass only get scheduled + onto nodes which support it. + type: object + required: + - artifacts + - name + type: object + type: array + type: object + enabled: + description: Enabled indicates if deployment of Kata Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Kata Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Kata Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Kata Manager image tag + type: string + type: object + mig: + description: MIG spec + properties: + strategy: + description: 'Optional: MIGStrategy to apply for GFD and NVIDIA + Device Plugin' + enum: + - none + - single + - mixed + type: string + type: object + migManager: + description: MIGManager for configuration to deploy MIG Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom mig-parted configuration for NVIDIA + MIG Manager container' + properties: + default: + default: all-disabled + description: Default MIG config to be applied on the node, + when there is no config specified with the node label nvidia.com/mig.config + enum: + - all-disabled + - "" + type: string + name: + default: default-mig-parted-config + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA MIG Manager + is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gpuClientsConfig: + description: 'Optional: Custom gpu-clients configuration for NVIDIA + MIG Manager container' + properties: + name: + description: ConfigMap name + type: string + type: object + image: + description: NVIDIA MIG Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA MIG Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA MIG Manager image tag + type: string + type: object + nodeStatusExporter: + description: NodeStatusExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of Node Status Exporter + is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Node Status Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Node Status Exporterimage repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Node Status Exporterimage tag + type: string + type: object + operator: + description: Operator component spec + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + defaultRuntime: + default: docker + description: Runtime defines container runtime type + enum: + - docker + - crio + - containerd + type: string + initContainer: + description: InitContainerSpec describes configuration for initContainer + image used with all components + properties: + image: + description: Image represents image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents image repository path + type: string + version: + description: Version represents image tag(version) + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + runtimeClass: + default: nvidia + type: string + use_ocp_driver_toolkit: + description: UseOpenShiftDriverToolkit indicates if DriverToolkit + image should be used on OpenShift to build and install driver + modules + type: boolean + required: + - defaultRuntime + type: object + psa: + description: PSA defines spec for PodSecurityAdmission configuration + properties: + enabled: + description: Enabled indicates if PodSecurityAdmission configuration + needs to be enabled for all Pods + type: boolean + type: object + psp: + description: |- + Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead + PSP defines spec for handling PodSecurityPolicies + properties: + enabled: + description: Enabled indicates if PodSecurityPolicies needs to + be enabled for all Pods + type: boolean + type: object + sandboxDevicePlugin: + description: SandboxDevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Sandbox + Device Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Sandbox Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA Sandbox Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Sandbox Device Plugin image tag + type: string + type: object + sandboxWorkloads: + description: SandboxWorkloads defines the spec for handling sandbox + workloads (i.e. Virtual Machines) + properties: + defaultWorkload: + default: container + description: |- + DefaultWorkload indicates the default GPU workload type to configure + worker nodes in the cluster for + enum: + - container + - vm-passthrough + - vm-vgpu + type: string + enabled: + description: |- + Enabled indicates if the GPU Operator should manage additional operands required + for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) + type: boolean + type: object + toolkit: + description: Toolkit component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Container + Toolkit through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Container Toolkit image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + installDir: + default: /usr/local/nvidia + description: Toolkit install directory on the host + type: string + repository: + description: NVIDIA Container Toolkit image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Container Toolkit image tag + type: string + type: object + validator: + description: Validator defines the spec for operator-validator daemonset + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + cuda: + description: CUDA validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + driver: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Validator image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + plugin: + description: Plugin validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + repository: + description: Validator image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toolkit: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + version: + description: Validator image tag + type: string + vfioPCI: + description: VfioPCI validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuDevices: + description: VGPUDevices validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuManager: + description: VGPUManager validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + type: object + vfioManager: + description: VFIOManager for configuration to deploy VFIO-PCI Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of VFIO Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: VFIO Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: VFIO Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: VFIO Manager image tag + type: string + type: object + vgpuDeviceManager: + description: VGPUDeviceManager spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: NVIDIA vGPU devices configuration for NVIDIA vGPU + Device Manager container + properties: + default: + default: default + description: Default config name within the ConfigMap + type: string + name: + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Device + Manager is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Device Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Device Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Device Manager image tag + type: string + type: object + vgpuManager: + description: VGPUManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Manager + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Manager image tag + type: string + type: object + required: + - daemonsets + - dcgm + - dcgmExporter + - devicePlugin + - driver + - gfd + - nodeStatusExporter + - operator + - toolkit + type: object + status: + description: ClusterPolicyStatus defines the observed state of ClusterPolicy + properties: + conditions: + description: Conditions is a list of conditions representing the ClusterPolicy's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + is installed + type: string + state: + description: State indicates status of ClusterPolicy + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.6.2/manifests/nvidia.com_nvidiadrivers.yaml b/bundle/v24.6.2/manifests/nvidia.com_nvidiadrivers.yaml new file mode 100644 index 000000000..665088edd --- /dev/null +++ b/bundle/v24.6.2/manifests/nvidia.com_nvidiadrivers.yaml @@ -0,0 +1,810 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: nvidiadrivers.nvidia.com +spec: + group: nvidia.com + names: + kind: NVIDIADriver + listKind: NVIDIADriverList + plural: nvidiadrivers + shortNames: + - nvd + - nvdriver + - nvdrivers + singular: nvidiadriver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: NVIDIADriver is the Schema for the nvidiadrivers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NVIDIADriverSpec defines the desired state of NVIDIADriver + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for NVIDIA + Driver container' + properties: + name: + type: string + type: object + driverType: + default: gpu + description: DriverType defines NVIDIA driver type + enum: + - gpu + - vgpu + - vgpu-host-manager + type: string + x-kubernetes-validations: + - message: driverType is an immutable field. Please create a new NvidiaDriver + resource instead when you want to change this setting. + rule: self == oldSelf + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gdrcopy: + description: GDRCopy defines the spec for GDRCopy driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GDRCopy diver image repository + type: string + version: + description: GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + image: + default: nvcr.io/nvidia/driver + description: NVIDIA Driver container image name + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters for + the NVIDIA Driver' + properties: + name: + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU licensing' + properties: + name: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System is + used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver Manager + initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository path + type: string + version: + description: Version represents NVIDIA Driver Manager image tag(version) + type: string + type: object + nodeAffinity: + description: Affinity specifies node affinity rules for driver pods + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a selector for installation of + NVIDIA driver + type: object + priorityClassName: + description: 'Optional: Set priorityClassName' + type: string + rdma: + description: GPUDirectRDMA defines the spec for NVIDIA Peer Memory + driver + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled through + GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + name: + type: string + type: object + repository: + description: NVIDIA Driver repository + type: string + resources: + description: 'Optional: Define resources requests and limits for each + pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA Driver + using pre-compiled modules is enabled + type: boolean + x-kubernetes-validations: + - message: usePrecompiled is an immutable field. Please create a new + NvidiaDriver resource instead when you want to change this setting. + rule: self == oldSelf + version: + description: NVIDIA Driver version (or just branch for precompiled + drivers) + type: string + virtualTopologyConfig: + description: 'Optional: Virtual Topology Daemon configuration for + NVIDIA vGPU drivers' + properties: + name: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + required: + - driverType + - image + type: object + status: + description: NVIDIADriverStatus defines the observed state of NVIDIADriver + properties: + conditions: + description: Conditions is a list of conditions representing the NVIDIADriver's + current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + and driver are installed + type: string + state: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + State indicates status of NVIDIADriver instance + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.6.2/metadata/annotations.yaml b/bundle/v24.6.2/metadata/annotations.yaml new file mode 100644 index 000000000..f7383d5c4 --- /dev/null +++ b/bundle/v24.6.2/metadata/annotations.yaml @@ -0,0 +1,17 @@ +annotations: + operators.operatorframework.io.bundle.channels.v1: stable,v24.6 + operators.operatorframework.io.bundle.channel.default.v1: v24.6 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: gpu-operator-certified + operators.operatorframework.io.metrics.builder: operator-sdk-v1.4.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: nvidia-gpu-operator + + # Annotations to specify OCP versions compatibility. + com.redhat.openshift.versions: v4.12-v4.16 diff --git a/bundle/v24.9.0/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/v24.9.0/manifests/gpu-operator-certified.clusterserviceversion.yaml new file mode 100644 index 000000000..ba850b0cc --- /dev/null +++ b/bundle/v24.9.0/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -0,0 +1,925 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + labels: + operatorframework.io/arch.arm64: supported + operatorframework.io/arch.amd64: supported + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + annotations: + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "true" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + features.operators.openshift.io/cnf: "false" + features.operators.openshift.io/cni: "false" + features.operators.openshift.io/csi: "false" + olm.skipRange: '>=1.9.0 <24.9.0' + alm-examples: |- + [ + { + "apiVersion": "nvidia.com/v1", + "kind": "ClusterPolicy", + "metadata": { + "name": "gpu-cluster-policy" + }, + "spec": { + "operator": { + "defaultRuntime": "crio", + "use_ocp_driver_toolkit": true, + "initContainer": { + } + }, + "sandboxWorkloads": { + "enabled": false, + "defaultWorkload": "container" + }, + "driver": { + "enabled": true, + "useNvidiaDriverCRD": false, + "useOpenKernelModules": false, + "upgradePolicy": { + "autoUpgrade": true, + "drain": { + "deleteEmptyDir": false, + "enable": false, + "force": false, + "timeoutSeconds": 300 + }, + "maxParallelUpgrades": 1, + "maxUnavailable": "25%", + "podDeletion": { + "deleteEmptyDir": false, + "force": false, + "timeoutSeconds": 300 + }, + "waitForCompletion": { + "timeoutSeconds": 0 + } + }, + "repoConfig": { + "configMapName": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "configMapName": "" + }, + "virtualTopology": { + "config": "" + }, + "kernelModuleConfig": { + "name": "" + } + }, + "dcgmExporter": { + "enabled": true, + "config": { + "name": "" + }, + "serviceMonitor": { + "enabled": true + } + }, + "dcgm": { + "enabled": true + }, + "daemonsets": { + "updateStrategy": "RollingUpdate", + "rollingUpdate": { + "maxUnavailable": "1" + } + }, + "devicePlugin": { + "enabled": true, + "config": { + "name": "", + "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" + } + }, + "gfd": { + "enabled": true + }, + "migManager": { + "enabled": true + }, + "nodeStatusExporter": { + "enabled": true + }, + "mig": { + "strategy": "single" + }, + "toolkit": { + "enabled": true + }, + "validator": { + "plugin": { + "env": [ + { + "name": "WITH_WORKLOAD", + "value": "false" + } + ] + } + }, + "vgpuManager": { + "enabled": false + }, + "vgpuDeviceManager": { + "enabled": true + }, + "sandboxDevicePlugin": { + "enabled": true + }, + "vfioManager": { + "enabled": true + }, + "gds": { + "enabled": false + }, + "gdrcopy": { + "enabled": false + } + } + }, + { + "apiVersion": "nvidia.com/v1alpha1", + "kind": "NVIDIADriver", + "metadata": { + "name": "gpu-driver" + }, + "spec": { + "driverType": "gpu", + "repository": "nvcr.io/nvidia", + "image": "driver", + "version": "sha256:8a9a9e9470f64d340a7f3106a03e9622fa98e25368fbfb7ce9c416ad98f6d951", + "nodeSelector": {}, + "manager": {}, + "repoConfig": { + "name": "" + }, + "certConfig": { + "name": "" + }, + "licensingConfig": { + "nlsEnabled": true, + "name": "" + }, + "virtualTopologyConfig": { + "name": "" + }, + "kernelModuleConfig": { + "name": "" + } + } + } + ] + operators.operatorframework.io/builder: operator-sdk-v1.4.0 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operatorframework.io/suggested-namespace: nvidia-gpu-operator + capabilities: Deep Insights + categories: AI/Machine Learning, OpenShift Optional + certified: "true" + containerImage: nvcr.io/nvidia/gpu-operator@sha256:3d741e8399519227cba0391b471fab2161501b0983e66789fabead4062d801c6 + createdAt: "Thu Oct 31 09:45:53 PDT 2024" + description: Automate the management and monitoring of NVIDIA GPUs. + provider: NVIDIA + repository: http://github.com/NVIDIA/gpu-operator + support: NVIDIA + name: gpu-operator-certified.v24.9.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + relatedImages: + - name: gpu-operator-image + image: nvcr.io/nvidia/gpu-operator@sha256:3d741e8399519227cba0391b471fab2161501b0983e66789fabead4062d801c6 + - name: dcgm-exporter-image + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:10ff95e83bc137796f5be76278a6b38fd31c7360e62c7e72011b428f8848c791 + - name: dcgm-image + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:7b0ebd6c40a11b6484dc4385605372511e4e93132a44d2a3d6ec2e36c24e6783 + - name: container-toolkit-image + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:bb6b55a5bd6419df3ca2d8ec0738b87491fc45e15587e613663890dc3a8e6e13 + - name: driver-image + image: nvcr.io/nvidia/driver@sha256:8a9a9e9470f64d340a7f3106a03e9622fa98e25368fbfb7ce9c416ad98f6d951 + - name: driver-image-535 + image: nvcr.io/nvidia/driver@sha256:6b75c7534efa6ec480e8eeea625949cd74330aad287239e1abf160622c4814f3 + - name: driver-image-560 + image: nvcr.io/nvidia/driver@sha256:38b66a8d44cab9e2c62da9e101f32cd9dbcb5e02d8e57b47671284d374ca3695 + - name: driver-image-565 + image: nvcr.io/nvidia/driver@sha256:d55b57938866e538acc3a71ca32f8cf87e71c591abd4a34695ee428e7ec2fa73 + - name: device-plugin-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea + - name: gpu-feature-discovery-image + image: nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea + - name: mig-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:40830d3561c14743f484d45b498141f9e86b1308e16fae3978110783927264ab + - name: init-container-image + image: nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190 + - name: gpu-operator-validator-image + image: nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:70a0bd29259820d6257b04b0cdb6a175f9783d4dd19ccc4ec6599d407c359ba5 + - name: k8s-driver-manager-image + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:b072c5793be65eee556eaff1b9cbbd115a1ef29982be95b2959adfcb4bc72382 + - name: vfio-manager-image + image: nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190 + - name: sandbox-device-plugin-image + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:4ffa1cd2a6497eb647a89ed259dcfb007554737b9d80f69bc173a2c3cd72a1da + - name: vgpu-device-manager-image + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:7edd7a0413dcb39b6e3bcefaf06812f3293c8e480ca10783e821a561ed686200 + - name: gdrcopy-image + image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:cf39d78ced7fb5727a9668ee2cd44b14bb7a23a95b83d5464b7d755740e02121 + customresourcedefinitions: + owned: + - name: nvidiadrivers.nvidia.com + kind: NVIDIADriver + version: v1alpha1 + displayName: NVIDIADriver + description: NVIDIADriver allows you to deploy the NVIDIA driver + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + statusDescriptors: + - description: The current state of the driver. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + - name: clusterpolicies.nvidia.com + kind: ClusterPolicy + version: v1 + group: nvidia.com + displayName: ClusterPolicy + description: ClusterPolicy allows you to configure the GPU Operator + resources: + - kind: ServiceAccount + name: '' + version: v1 + - kind: Deployment + name: '' + version: apps/v1 + - kind: DaemonSet + name: '' + version: apps/v1 + - kind: ConfigMap + name: '' + version: v1 + - kind: Pod + name: '' + version: v1 + - kind: clusterpolicies + name: '' + version: v1 + - kind: clusterversions + name: '' + version: v1 + - kind: nodes + name: '' + version: v1 + - kind: status + name: '' + version: v1 + specDescriptors: + - description: GPU Operator config + displayName: GPU Operator config + path: operator + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: operator.validator.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: Image pull secrets + displayName: Image pull secrets + path: operator.validator.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - description: NVIDIA GPU/vGPU Driver config + displayName: NVIDIA GPU/vGPU Driver config + path: driver + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: driver.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: driver.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: driver.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: driver.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: driver.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA DCGM Exporter config + displayName: NVIDIA DCGM Exporter config + path: dcgmExporter + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: dcgmExporter.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: dcgmExporter.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: dcgmExporter.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: dcgmExporter.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: dcgmExporter.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Device Plugin config + displayName: NVIDIA Device Plugin config + path: devicePlugin + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: devicePlugin.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: devicePlugin.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: devicePlugin.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: devicePlugin.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: devicePlugin.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: GPU Feature Discovery Plugin config + displayName: GPU Feature Discovery Plugin config + path: gfd + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: gfd.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: gfd.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: gfd.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: gfd.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: gfd.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - description: NVIDIA Container Toolkit config + displayName: NVIDIA Container Toolkit config + path: toolkit + - description: 'Optional: Set Node affinity' + displayName: Node affinity + path: toolkit.affinity + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:com.tectonic.ui:nodeAffinity' + - description: Node selector to control the selection of nodes (optional) + displayName: Node Selector + path: toolkit.nodeSelector + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:selector:Node' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - description: 'Optional: Set tolerations' + displayName: Tolerations + path: toolkit.tolerations + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - 'urn:alm:descriptor:io.kubernetes:Tolerations' + - description: Image pull secrets + displayName: Image pull secrets + path: toolkit.imagePullSecrets + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - displayName: ImagePullPolicy + description: 'Image pull policy (default: IfNotPresent)' + path: toolkit.imagePullPolicy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:imagePullPolicy' + - displayName: NVIDIA DCGM config + description: NVIDIA DCGM config + path: dcgm + - displayName: Validator config + description: Validator config + path: validator + - displayName: Node Status Exporter config + description: Node Status Exporter config + path: nodeStatusExporter + - displayName: Daemonsets config + description: Daemonsets config + path: daemonsets + - displayName: MIG config + description: MIG config + path: mig + - displayName: NVIDIA MIG Manager config + description: NVIDIA MIG Manager config + path: migManager + - displayName: PodSecurityPolicy config + description: PodSecurityPolicy config + path: psp + - displayName: NVIDIA GPUDirect Storage config + description: NVIDIA GPUDirect Storage config + path: gds + - displayName: Sandbox Workloads config + description: Sandbox Workloads config + path: sandboxWorkloads + - displayName: NVIDIA vGPU Manager config + description: NVIDIA vGPU Manager config + path: vgpuManager + - displayName: NVIDIA vGPU Device Manager config + description: NVIDIA vGPU Device Manager config + path: vgpuDeviceManager + - displayName: VFIO Manager config + description: VFIO Manager config + path: vfioManager + - displayName: NVIDIA Sandbox Device Plugin config + description: NVIDIA Sandbox Device Plugin config + path: sandboxDevicePlugin + statusDescriptors: + - description: The current state of the operator. + displayName: State + path: state + x-descriptors: + - 'urn:alm:descriptor:text' + displayName: NVIDIA GPU Operator + description: > + Kubernetes provides access to special hardware resources such as NVIDIA + GPUs, NICs, Infiniband adapters and other devices through the [device plugin + framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). + However, configuring and managing nodes with these hardware resources + requires configuration of multiple software components such as drivers, + container runtimes or other libraries which are difficult and prone to + errors. + + The NVIDIA GPU Operator uses the [operator + framework](https://cloud.redhat.com/blog/introducing-the-operator-framework) within + Kubernetes to automate the management of all NVIDIA software components + needed to provision and monitor GPUs. + These components include the NVIDIA drivers (to enable CUDA), Kubernetes + device plugin for GPUs, the NVIDIA Container Runtime, automatic node + labelling and NVIDIA DCGM exporter. + + Visit the official site of the [GPU Operator](https://github.com/NVIDIA/gpu-operator) for more information. + For getting started with using the GPU Operator with OpenShift, see the instructions + [here](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/contents.html). + icon: + - base64data: iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAB2lBMVEUAAAD///8EBAN3uQACAgIAAAQJDQUCAgB1tgAHCQf+/v5Ufg5Hagxxqwt+xgJ3uAB9wwB4vQBRUVEeLA3e3t5nZ2coKCgODg4FBwZ9wwR6wAJ4vADz8/MbGxt5tw1vpw1/wgoOFwkLDwh9xQH5+fny8vLw8PDFxcWysrKFhYVvb282NjYyMjIqKioXFxdikxRYgxNCYxJQdhFqoQ9xrg16ugxyqgyAxQkEBQj7+/v29vbIyMhjY2NbW1tHR0cvLy8kJCQdHR0ZGRlKbxJ8uhFNcxFVgBAxSBBgkg93tQ50sA4qPg4XIg18vwsbKQsSGgsLCwsMEwqCyQeByQFztADPz8+/v7+6urqWlpZra2tKSkogICASEhJmmRE8XBA5VRA2UBBonA9biA9GaQ4sQg4jMw4mOQ0aJw2GzgsUHgttpAqJ0Ql/wQWG0AJ8vwF0uQCtra2jo6OQkJB9fX1VVVVCQkI9PT0iIiIUFBRSfBNgjhA7WRBGZw+GywmFzgaAyASBxQN2twDb29u2traenp6Kiop+fn53d3dzc3NyqRV4sxM/YBNAXRElNhBjlQ+IzA00TQ16vgxJbgp6vAl4tgJ3vgDs7Ozn5+fa2trS0tJCXRY6VBV6thSL1gf4nFdFAAAD80lEQVRYw+zSOXPaQBgG4He0LJJmbGRGDUIzuvgBQiAEPfcdwC33DTbUtmOwSyc+4iRucvzXRImLFJmRShc8xXbfu+9+szg4OHjjAsH/iFD49q7rqM6xc/wPtWyBhS8sC94ObWRCZDksh1+RzmcEfI0DoPrjylEkSTgViMs9udjYTwMG4Gf51Z1BM81ioRwit+QvgYsdUQZeKFr3ladyKXvVr+pAM5uKcmRLXFzoCIxn+0i/8lSaBMHnfi7qowfQuZnm3PuFPwGs13zD3NlViozY/z4YD6/TCQORbPr2q78GLB0ou5IO40pd5AxQZnJ83m2y9Ju2JYKfgEhWC18aEIfrZLURHwQC0B87ySZwHxX8BNDWB1KfQfyxT2TA24uPQMt8yTWA3obz8wQGlhTN06Z900MkuJLrYu3u5LkK9LTtGRF8NEDLeSnXYLUdHUFVlpPqTa4IamlhJZ464biY1w4CKGrROOW7uwLlV+Q02lanCF6cbSoPVLzUfPwDll5I9T6WyXWhZre1yjiI6VCSzCWY3+FKaAwGHngzpEygx6+V6Uzk6TJR7yhWxJ1bFgTPJ7gMc58aUCq+n+qNT6Pn8y/xOcCiZZVjnJ+AAPhEuj0SKZ9bL9ZpNS9SgM6z9p5w3jt43cMvecfWBhm7dtfEpfhYMDBYpFd7mDZIAxPCFKgBhB0hkWbE2wVMyqycfhOMEiebSzFz5IMTEjw7E87UFj4GVR7GXqaSkoIcISEc/I38/PwhOTUMRBrADgwK09zgYGUBqbwcARiQyp3Eyk6kC4BloqtbJTcaSHIHShALWFmBSRuCWBGC+AtDMAAGIpAAc9mBiB0sCLSXHUSygxSxEIoE7IKEgbhopKgogC96x04QCMMw/H0cG6f0cEmBHaLc7FFQzApoTLwtQgWUWo26glx2mzGkyoHM1PPMO/NrnSH8e2QAiRsZ8S3ZuJoW5Udg5moGoMRLN2gAnkcUctueJ1gADsdtlZ2AgmSYoaDZBXwRctcwy6HN3XX/wfnTnA7Q5x0S0Gku4wHpe7Ql8Mbtu4TqC3qcADGtUl4O3eK0AkZdKH1mU/a6MFQGA7pQGoAVoAuuPYZlLJF2BawVLLjwac6Q8wUax61/CpKQAT6ZX3hFqoqqAFvuf4AzM+NgsoBS/wcSOD7SFzyf6CE9UQK9II1MRvIJm8QSgsLiBZuypsAWKyARElgx5FcLv1N4nFLbB45Sh6+TzsQRtn7bz/B3fS9GQ12bgUE2PKycQbwgXD0SWLwVhpZFq4eHhWloOjLoqGvoRYRGAR2vp2EtpNUaTUpiRAizMAEhKNXpYZNnAUlBCSgFYTIxQTlMMJNGwSgYBdQHAFsKs+/bUkeyAAAAAElFTkSuQmCC + mediatype: image/png + install: + spec: + clusterPermissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - nvidia.com + resources: + - clusterpolicies + - clusterpolicies/finalizers + - clusterpolicies/status + - nvidiadrivers + - nvidiadrivers/finalizers + - nvidiadrivers/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - deletecollection + - apiGroups: + - config.openshift.io + resources: + - clusterversions + - proxies + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + - create + - get + - list + - watch + - patch + - update + - delete + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - hostmount-anyuid + - apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - create + - watch + - update + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + resources: + - pods + - pods/eviction + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch + - apiGroups: + - node.k8s.io + resources: + - runtimeclasses + verbs: + - get + - list + - create + - update + - watch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + permissions: + - serviceAccountName: gpu-operator + rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - secrets + - services + - services/finalizers + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - list + - create + - watch + - update + - delete + deployments: + - name: gpu-operator + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + strategy: {} + template: + metadata: + labels: + app.kubernetes.io/component: gpu-operator + app: gpu-operator + nvidia.com/gpu-driver-upgrade-drain.skip: "true" + spec: + priorityClassName: system-node-critical + containers: + - args: + - --leader-elect + - --leader-lease-renew-deadline + - "60s" + image: nvcr.io/nvidia/gpu-operator@sha256:3d741e8399519227cba0391b471fab2161501b0983e66789fabead4062d801c6 + command: + - gpu-operator + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: gpu-operator + ports: + - name: metrics + containerPort: 8080 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "VALIDATOR_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator@sha256:70a0bd29259820d6257b04b0cdb6a175f9783d4dd19ccc4ec6599d407c359ba5" + - name: "GFD_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea" + - name: "CONTAINER_TOOLKIT_IMAGE" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:bb6b55a5bd6419df3ca2d8ec0738b87491fc45e15587e613663890dc3a8e6e13" + - name: "DCGM_IMAGE" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:7b0ebd6c40a11b6484dc4385605372511e4e93132a44d2a3d6ec2e36c24e6783" + - name: "DCGM_EXPORTER_IMAGE" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:10ff95e83bc137796f5be76278a6b38fd31c7360e62c7e72011b428f8848c791" + - name: "DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:7089559ce6153018806857f5049085bae15b3bf6f1c8bd19d8b12f707d087dea" + - name: "DRIVER_IMAGE" + value: "nvcr.io/nvidia/driver@sha256:8a9a9e9470f64d340a7f3106a03e9622fa98e25368fbfb7ce9c416ad98f6d951" + - name: "DRIVER_IMAGE-535" + value: "nvcr.io/nvidia/driver@sha256:6b75c7534efa6ec480e8eeea625949cd74330aad287239e1abf160622c4814f3" + - name: "DRIVER_IMAGE-560" + value: "nvcr.io/nvidia/driver@sha256:38b66a8d44cab9e2c62da9e101f32cd9dbcb5e02d8e57b47671284d374ca3695" + - name: "DRIVER_IMAGE-565" + value: "nvcr.io/nvidia/driver@sha256:d55b57938866e538acc3a71ca32f8cf87e71c591abd4a34695ee428e7ec2fa73" + - name: "DRIVER_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:b072c5793be65eee556eaff1b9cbbd115a1ef29982be95b2959adfcb4bc72382" + - name: "MIG_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:40830d3561c14743f484d45b498141f9e86b1308e16fae3978110783927264ab" + - name: "CUDA_BASE_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190" + - name: "VFIO_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cuda@sha256:748a2c5178e5c5811b66183bd0ce87d9fdccf992c0ad9b1a5076841e45533190" + - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:4ffa1cd2a6497eb647a89ed259dcfb007554737b9d80f69bc173a2c3cd72a1da" + - name: "VGPU_DEVICE_MANAGER_IMAGE" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:7edd7a0413dcb39b6e3bcefaf06812f3293c8e480ca10783e821a561ed686200" + - name: "GDRCOPY_IMAGE" + value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:cf39d78ced7fb5727a9668ee2cd44b14bb7a23a95b83d5464b7d755740e02121" + terminationGracePeriodSeconds: 10 + volumes: + - hostPath: + path: /etc/os-release + name: host-os-release + serviceAccountName: gpu-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: false + type: AllNamespaces + keywords: + - gpu + - cuda + - compute + - operator + - deep learning + - monitoring + - tesla + maintainers: + - name: NVIDIA + email: operator_feedback@nvidia.com + maturity: stable + provider: + name: NVIDIA Corporation + version: 24.9.0 + replaces: gpu-operator-certified.v24.6.2 diff --git a/bundle/v24.9.0/manifests/nvidia.com_clusterpolicies.yaml b/bundle/v24.9.0/manifests/nvidia.com_clusterpolicies.yaml new file mode 100644 index 000000000..8ee8e9a8a --- /dev/null +++ b/bundle/v24.9.0/manifests/nvidia.com_clusterpolicies.yaml @@ -0,0 +1,2384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: clusterpolicies.nvidia.com +spec: + group: nvidia.com + names: + kind: ClusterPolicy + listKind: ClusterPolicyList + plural: clusterpolicies + singular: clusterpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterPolicy is the Schema for the clusterpolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterPolicySpec defines the desired state of ClusterPolicy + properties: + ccManager: + description: CCManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + defaultMode: + description: Default CC mode setting for compatible GPUs on the + node + enum: + - "on" + - "off" + - devtools + type: string + enabled: + description: Enabled indicates if deployment of CC Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: CC Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: CC Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: CC Manager image tag + type: string + type: object + cdi: + description: CDI configures how the Container Device Interface is + used in the cluster + properties: + default: + default: false + description: Default indicates whether to use CDI as the default + mechanism for providing GPU access to containers. + type: boolean + enabled: + default: false + description: Enabled indicates whether CDI can be used to make + GPUs accessible to containers. + type: boolean + type: object + daemonsets: + description: Daemonset defines common configuration for all Daemonsets + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + priorityClassName: + type: string + rollingUpdate: + description: 'Optional: Configuration for rolling update of all + DaemonSet pods' + properties: + maxUnavailable: + type: string + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + updateStrategy: + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + type: string + type: object + dcgm: + description: DCGM component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Hostengine + as a separate pod is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + hostPort: + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' + format: int32 + type: integer + image: + description: NVIDIA DCGM image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA DCGM image tag + type: string + type: object + dcgmExporter: + description: DCGMExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom metrics configuration for NVIDIA + DCGM Exporter' + properties: + name: + description: ConfigMap name with file dcgm-metrics.csv for + metrics to be collected by NVIDIA DCGM Exporter + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Exporter + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA DCGM Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM Exporter image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serviceMonitor: + description: 'Optional: ServiceMonitor configuration for NVIDIA + DCGM Exporter' + properties: + additionalLabels: + additionalProperties: + type: string + description: AdditionalLabels to add to ServiceMonitor instance + for NVIDIA DCGM Exporter + type: object + enabled: + description: Enabled indicates if ServiceMonitor is deployed + for NVIDIA DCGM Exporter + type: boolean + honorLabels: + description: HonorLabels chooses the metric’s labels on collisions + with target labels. + type: boolean + interval: + description: |- + Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. + Supported units: y, w, d, h, m, s, ms + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + relabelings: + description: Relabelings allows to rewrite labels on metric + sets for NVIDIA DCGM Exporter + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + version: + description: NVIDIA DCGM Exporter image tag + type: string + type: object + devicePlugin: + description: DevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Configuration for the NVIDIA Device Plugin + via the ConfigMap' + properties: + default: + description: Default config name within the ConfigMap for + the NVIDIA Device Plugin config + type: string + name: + description: ConfigMap name for NVIDIA Device Plugin config + including shared config between plugin and GFD + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Device + Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object + repository: + description: NVIDIA Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Device Plugin image tag + type: string + type: object + driver: + description: Driver component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for + NVIDIA Driver container' + properties: + name: + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Driver + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters + for the NVIDIA Driver' + properties: + name: + type: string + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU + licensing' + properties: + configMapName: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System + is used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver + Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + rdma: + description: GPUDirectRDMASpec defines the properties for nvidia-peermem + deployment + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled + through GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + configMapName: + type: string + type: object + repository: + description: NVIDIA Driver image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + upgradePolicy: + description: Driver auto-upgrade settings + properties: + autoUpgrade: + default: false + description: |- + AutoUpgrade is a global switch for automatic upgrade feature + if set to false all other options are ignored + type: boolean + drain: + description: DrainSpec describes configuration for node drain + during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the node is drained) + type: boolean + enable: + default: false + description: Enable indicates if node draining is allowed + during upgrade + type: boolean + force: + default: false + description: Force indicates if force draining is allowed + type: boolean + podSelector: + description: |- + PodSelector specifies a label selector to filter pods on the node that need to be drained + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 300 + description: TimeoutSecond specifies the length of time + in seconds to wait before giving up drain, zero means + infinite + minimum: 0 + type: integer + type: object + maxParallelUpgrades: + default: 1 + description: |- + MaxParallelUpgrades indicates how many nodes can be upgraded in parallel + 0 means no limit, all nodes will be upgraded in parallel + minimum: 0 + type: integer + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. + Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). + Absolute number is calculated from percentage by rounding up. + By default, a fixed value of 25% is used. + x-kubernetes-int-or-string: true + podDeletion: + description: PodDeletionSpec describes configuration for deletion + of pods using special resources during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the pod is deleted) + type: boolean + force: + default: false + description: Force indicates if force deletion is allowed + type: boolean + timeoutSeconds: + default: 300 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + waitForCompletion: + description: WaitForCompletionSpec describes the configuration + for waiting on job completions + properties: + podSelector: + description: |- + PodSelector specifies a label selector for the pods to wait for completion + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 0 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + type: object + useNvidiaDriverCRD: + description: UseNvidiaDriverCRD indicates if the deployment of + NVIDIA Driver is managed by the NVIDIADriver CRD type + type: boolean + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA + Driver using pre-compiled modules is enabled + type: boolean + version: + description: NVIDIA Driver image tag + type: string + virtualTopology: + description: 'Optional: Virtual Topology Daemon configuration + for NVIDIA vGPU drivers' + properties: + config: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + type: object + gdrcopy: + description: GDRCopy component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + Operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GDRCopy driver image repository + type: string + version: + description: NVIDIA GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS components(Experimental) + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + gfd: + description: GPUFeatureDiscovery spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of GPU Feature Discovery + Plugin is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GFD image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GFD image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: GFD image tag + type: string + type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object + kataManager: + description: KataManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: Kata Manager config + properties: + artifactsDir: + default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses + description: |- + ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) + are placed on the local filesystem. + type: string + runtimeClasses: + description: RuntimeClasses is a list of kata runtime classes + to configure. + items: + description: RuntimeClass defines the configuration for + a kata RuntimeClass + properties: + artifacts: + description: Artifacts are the kata artifacts associated + with the runtime class. + properties: + pullSecret: + description: PullSecret is the secret used to pull + the OCI artifact. + type: string + url: + description: |- + URL is the path to the OCI artifact (payload) containing all artifacts + associated with a kata runtime class. + type: string + required: + - url + type: object + name: + description: Name is the name of the kata runtime class. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector specifies the nodeSelector for the RuntimeClass object. + This ensures pods running with the RuntimeClass only get scheduled + onto nodes which support it. + type: object + required: + - artifacts + - name + type: object + type: array + type: object + enabled: + description: Enabled indicates if deployment of Kata Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Kata Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Kata Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Kata Manager image tag + type: string + type: object + mig: + description: MIG spec + properties: + strategy: + description: 'Optional: MIGStrategy to apply for GFD and NVIDIA + Device Plugin' + enum: + - none + - single + - mixed + type: string + type: object + migManager: + description: MIGManager for configuration to deploy MIG Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom mig-parted configuration for NVIDIA + MIG Manager container' + properties: + default: + default: all-disabled + description: Default MIG config to be applied on the node, + when there is no config specified with the node label nvidia.com/mig.config + enum: + - all-disabled + - "" + type: string + name: + default: default-mig-parted-config + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA MIG Manager + is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gpuClientsConfig: + description: 'Optional: Custom gpu-clients configuration for NVIDIA + MIG Manager container' + properties: + name: + description: ConfigMap name + type: string + type: object + image: + description: NVIDIA MIG Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA MIG Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA MIG Manager image tag + type: string + type: object + nodeStatusExporter: + description: NodeStatusExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of Node Status Exporter + is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Node Status Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Node Status Exporterimage repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Node Status Exporterimage tag + type: string + type: object + operator: + description: Operator component spec + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + defaultRuntime: + default: docker + description: Runtime defines container runtime type + enum: + - docker + - crio + - containerd + type: string + initContainer: + description: InitContainerSpec describes configuration for initContainer + image used with all components + properties: + image: + description: Image represents image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents image repository path + type: string + version: + description: Version represents image tag(version) + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + runtimeClass: + default: nvidia + type: string + use_ocp_driver_toolkit: + description: UseOpenShiftDriverToolkit indicates if DriverToolkit + image should be used on OpenShift to build and install driver + modules + type: boolean + required: + - defaultRuntime + type: object + psa: + description: PSA defines spec for PodSecurityAdmission configuration + properties: + enabled: + description: Enabled indicates if PodSecurityAdmission configuration + needs to be enabled for all Pods + type: boolean + type: object + psp: + description: |- + Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead + PSP defines spec for handling PodSecurityPolicies + properties: + enabled: + description: Enabled indicates if PodSecurityPolicies needs to + be enabled for all Pods + type: boolean + type: object + sandboxDevicePlugin: + description: SandboxDevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Sandbox + Device Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Sandbox Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA Sandbox Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Sandbox Device Plugin image tag + type: string + type: object + sandboxWorkloads: + description: SandboxWorkloads defines the spec for handling sandbox + workloads (i.e. Virtual Machines) + properties: + defaultWorkload: + default: container + description: |- + DefaultWorkload indicates the default GPU workload type to configure + worker nodes in the cluster for + enum: + - container + - vm-passthrough + - vm-vgpu + type: string + enabled: + description: |- + Enabled indicates if the GPU Operator should manage additional operands required + for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) + type: boolean + type: object + toolkit: + description: Toolkit component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Container + Toolkit through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Container Toolkit image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + installDir: + default: /usr/local/nvidia + description: Toolkit install directory on the host + type: string + repository: + description: NVIDIA Container Toolkit image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Container Toolkit image tag + type: string + type: object + validator: + description: Validator defines the spec for operator-validator daemonset + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + cuda: + description: CUDA validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + driver: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Validator image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + plugin: + description: Plugin validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + repository: + description: Validator image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toolkit: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + version: + description: Validator image tag + type: string + vfioPCI: + description: VfioPCI validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuDevices: + description: VGPUDevices validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuManager: + description: VGPUManager validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + type: object + vfioManager: + description: VFIOManager for configuration to deploy VFIO-PCI Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of VFIO Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: VFIO Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: VFIO Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: VFIO Manager image tag + type: string + type: object + vgpuDeviceManager: + description: VGPUDeviceManager spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: NVIDIA vGPU devices configuration for NVIDIA vGPU + Device Manager container + properties: + default: + default: default + description: Default config name within the ConfigMap + type: string + name: + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Device + Manager is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Device Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Device Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Device Manager image tag + type: string + type: object + vgpuManager: + description: VGPUManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Manager + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Manager image tag + type: string + type: object + required: + - daemonsets + - dcgm + - dcgmExporter + - devicePlugin + - driver + - gfd + - nodeStatusExporter + - operator + - toolkit + type: object + status: + description: ClusterPolicyStatus defines the observed state of ClusterPolicy + properties: + conditions: + description: Conditions is a list of conditions representing the ClusterPolicy's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + is installed + type: string + state: + description: State indicates status of ClusterPolicy + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.9.0/manifests/nvidia.com_nvidiadrivers.yaml b/bundle/v24.9.0/manifests/nvidia.com_nvidiadrivers.yaml new file mode 100644 index 000000000..072155768 --- /dev/null +++ b/bundle/v24.9.0/manifests/nvidia.com_nvidiadrivers.yaml @@ -0,0 +1,797 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: nvidiadrivers.nvidia.com +spec: + group: nvidia.com + names: + kind: NVIDIADriver + listKind: NVIDIADriverList + plural: nvidiadrivers + shortNames: + - nvd + - nvdriver + - nvdrivers + singular: nvidiadriver + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: NVIDIADriver is the Schema for the nvidiadrivers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NVIDIADriverSpec defines the desired state of NVIDIADriver + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for NVIDIA + Driver container' + properties: + name: + type: string + type: object + driverType: + default: gpu + description: DriverType defines NVIDIA driver type + enum: + - gpu + - vgpu + - vgpu-host-manager + type: string + x-kubernetes-validations: + - message: driverType is an immutable field. Please create a new NvidiaDriver + resource instead when you want to change this setting. + rule: self == oldSelf + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gdrcopy: + description: GDRCopy defines the spec for GDRCopy driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GDRCopy diver image repository + type: string + version: + description: GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS driver + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + image: + default: nvcr.io/nvidia/driver + description: NVIDIA Driver container image name + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters for + the NVIDIA Driver' + properties: + name: + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU licensing' + properties: + name: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System is + used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver Manager + initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository path + type: string + version: + description: Version represents NVIDIA Driver Manager image tag(version) + type: string + type: object + nodeAffinity: + description: Affinity specifies node affinity rules for driver pods + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a selector for installation of + NVIDIA driver + type: object + priorityClassName: + description: 'Optional: Set priorityClassName' + type: string + rdma: + description: GPUDirectRDMA defines the spec for NVIDIA Peer Memory + driver + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled through + GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + name: + type: string + type: object + repository: + description: NVIDIA Driver repository + type: string + resources: + description: 'Optional: Define resources requests and limits for each + pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA Driver + using pre-compiled modules is enabled + type: boolean + x-kubernetes-validations: + - message: usePrecompiled is an immutable field. Please create a new + NvidiaDriver resource instead when you want to change this setting. + rule: self == oldSelf + version: + description: NVIDIA Driver version (or just branch for precompiled + drivers) + type: string + virtualTopologyConfig: + description: 'Optional: Virtual Topology Daemon configuration for + NVIDIA vGPU drivers' + properties: + name: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + required: + - driverType + - image + type: object + status: + description: NVIDIADriverStatus defines the observed state of NVIDIADriver + properties: + conditions: + description: Conditions is a list of conditions representing the NVIDIADriver's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + and driver are installed + type: string + state: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + State indicates status of NVIDIADriver instance + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/bundle/v24.9.0/metadata/annotations.yaml b/bundle/v24.9.0/metadata/annotations.yaml new file mode 100644 index 000000000..50d49a56b --- /dev/null +++ b/bundle/v24.9.0/metadata/annotations.yaml @@ -0,0 +1,17 @@ +annotations: + operators.operatorframework.io.bundle.channels.v1: stable,v24.9 + operators.operatorframework.io.bundle.channel.default.v1: v24.9 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: gpu-operator-certified + operators.operatorframework.io.metrics.builder: operator-sdk-v1.4.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operatorframework.io/cluster-monitoring: "true" + operatorframework.io/suggested-namespace: nvidia-gpu-operator + + # Annotations to specify OCP versions compatibility. + com.redhat.openshift.versions: v4.12-v4.17 diff --git a/cmd/gpu-operator/main.go b/cmd/gpu-operator/main.go index 4f55e46ec..076362c12 100644 --- a/cmd/gpu-operator/main.go +++ b/cmd/gpu-operator/main.go @@ -27,7 +27,12 @@ import ( // to ensure that exec-entrypoint and run can make use of them. "go.uber.org/zap/zapcore" _ "k8s.io/client-go/plugin/pkg/client/auth" + "sigs.k8s.io/controller-runtime/pkg/cache" + apiconfigv1 "github.com/openshift/api/config/v1" + apiimagev1 "github.com/openshift/api/image/v1" + secv1 "github.com/openshift/api/security/v1" + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" @@ -41,10 +46,11 @@ import ( "github.com/NVIDIA/k8s-operator-libs/pkg/upgrade" - clusterpolicyv1 "github.com/NVIDIA/gpu-operator/api/v1" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + clusterpolicyv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/controllers" "github.com/NVIDIA/gpu-operator/controllers/clusterinfo" + "github.com/NVIDIA/gpu-operator/internal/consts" "github.com/NVIDIA/gpu-operator/internal/info" // +kubebuilder:scaffold:imports ) @@ -56,11 +62,13 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(clusterpolicyv1.AddToScheme(scheme)) utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) utilruntime.Must(nvidiav1alpha1.AddToScheme(scheme)) - // +kubebuilder:scaffold:scheme + utilruntime.Must(promv1.AddToScheme(scheme)) + utilruntime.Must(secv1.Install(scheme)) + utilruntime.Must(apiconfigv1.Install(scheme)) + utilruntime.Must(apiimagev1.Install(scheme)) } func main() { @@ -98,6 +106,16 @@ func main() { Port: 9443, }) + operatorNamespace := os.Getenv("OPERATOR_NAMESPACE") + openshiftNamespace := consts.OpenshiftNamespace + cacheOptions := cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + operatorNamespace: {}, + // Also cache resources in the openshift namespace to retrieve ImageStreams when on an openshift cluster + openshiftNamespace: {}, + }, + } + options := ctrl.Options{ Scheme: scheme, Metrics: metricsOptions, @@ -105,6 +123,7 @@ func main() { LeaderElection: enableLeaderElection, LeaderElectionID: "53822513.nvidia.com", WebhookServer: webhookServer, + Cache: cacheOptions, } if enableLeaderElection && int(renewDeadline) != 0 { diff --git a/cmd/gpuop-cfg/validate/clusterpolicy/clusterpolicy.go b/cmd/gpuop-cfg/validate/clusterpolicy/clusterpolicy.go index c82e9794c..75c44f7bd 100644 --- a/cmd/gpuop-cfg/validate/clusterpolicy/clusterpolicy.go +++ b/cmd/gpuop-cfg/validate/clusterpolicy/clusterpolicy.go @@ -25,7 +25,7 @@ import ( "github.com/urfave/cli/v2" "sigs.k8s.io/yaml" - v1 "github.com/NVIDIA/gpu-operator/api/v1" + v1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) type command struct { diff --git a/cmd/gpuop-cfg/validate/clusterpolicy/images.go b/cmd/gpuop-cfg/validate/clusterpolicy/images.go index 4f315b95c..1383a6c50 100644 --- a/cmd/gpuop-cfg/validate/clusterpolicy/images.go +++ b/cmd/gpuop-cfg/validate/clusterpolicy/images.go @@ -23,7 +23,7 @@ import ( "github.com/regclient/regclient" "github.com/regclient/regclient/types/ref" - v1 "github.com/NVIDIA/gpu-operator/api/v1" + v1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) var client = regclient.New() diff --git a/cmd/gpuop-cfg/validate/csv/alm-examples.go b/cmd/gpuop-cfg/validate/csv/alm-examples.go index feec0bfda..d853a470f 100644 --- a/cmd/gpuop-cfg/validate/csv/alm-examples.go +++ b/cmd/gpuop-cfg/validate/csv/alm-examples.go @@ -20,15 +20,15 @@ import ( "fmt" "github.com/operator-framework/api/pkg/operators/v1alpha1" - "sigs.k8s.io/json" + "k8s.io/apimachinery/pkg/util/json" - v1 "github.com/NVIDIA/gpu-operator/api/v1" + v1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) func validateALMExample(csv *v1alpha1.ClusterServiceVersion) error { cpList := []v1.ClusterPolicy{} example := csv.Annotations["alm-examples"] - err := json.UnmarshalCaseSensitivePreserveInts([]byte(example), &cpList) + err := json.Unmarshal([]byte(example), &cpList) if err != nil { return err } diff --git a/config/crd/bases/nvidia.com_clusterpolicies.yaml b/config/crd/bases/nvidia.com_clusterpolicies.yaml index 16e35bf4b..54e4a652b 100644 --- a/config/crd/bases/nvidia.com_clusterpolicies.yaml +++ b/config/crd/bases/nvidia.com_clusterpolicies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusterpolicies.nvidia.com spec: group: nvidia.com @@ -248,8 +248,8 @@ spec: type: object type: array hostPort: - description: 'HostPort represents host port that needs to be bound - for DCGM engine (Default: 5555)' + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' format: int32 type: integer image: @@ -410,15 +410,20 @@ spec: sets for NVIDIA DCGM Exporter items: description: |- - RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. - It defines ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config properties: action: default: replace description: |- - Action to perform based on regex matching. Default is 'replace'. - uppercase and lowercase actions require Prometheus >= 2.36. + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" enum: - replace - Replace @@ -444,39 +449,47 @@ spec: - DropEqual type: string modulus: - description: Modulus to take of the hash of the source - label values. + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. format: int64 type: integer regex: description: Regular expression against which the extracted - value is matched. Default is '(.*)' + value is matched. type: string replacement: description: |- - Replacement value against which a regex replace is performed if the - regular expression matches. Regex capture groups are available. Default is '$1' + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. type: string separator: - description: Separator placed between concatenated source - label values. default is ';'. + description: Separator is the string between concatenated + SourceLabels. type: string sourceLabels: description: |- - The source labels select values from existing labels. Their content is concatenated - using the configured separator and matched against the configured regular expression - for the replace, keep, and drop actions. + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. items: - description: LabelName is a valid Prometheus label - name which may only contain ASCII letters, numbers, - as well as underscores. + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ type: string type: array targetLabel: description: |- - Label to which the resulting value is written in a replace action. - It is mandatory for replace actions. Regex capture groups are available. + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. type: string type: object type: array @@ -538,6 +551,15 @@ spec: items: type: string type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object repository: description: NVIDIA Device Plugin image repository type: string @@ -926,9 +948,9 @@ spec: type: boolean timeoutSeconds: default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -944,9 +966,9 @@ spec: type: string timeoutSeconds: default: 0 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -1146,6 +1168,24 @@ spec: description: GFD image tag type: string type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object kataManager: description: KataManager component spec properties: @@ -2269,16 +2309,8 @@ spec: description: Conditions is a list of conditions representing the ClusterPolicy's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -2319,12 +2351,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/nvidia.com_nvidiadrivers.yaml b/config/crd/bases/nvidia.com_nvidiadrivers.yaml index 317972fd2..c49059a38 100644 --- a/config/crd/bases/nvidia.com_nvidiadrivers.yaml +++ b/config/crd/bases/nvidia.com_nvidiadrivers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: nvidiadrivers.nvidia.com spec: group: nvidia.com @@ -357,11 +357,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -389,11 +391,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -406,6 +410,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -450,11 +455,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -482,14 +489,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -709,16 +719,8 @@ spec: description: Conditions is a list of conditions representing the NVIDIADriver's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -759,12 +761,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a21a8607d..7a631b9e3 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -8,25 +8,14 @@ rules: - "" resources: - configmaps + - endpoints - events + - namespaces - nodes - persistentvolumeclaims - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - endpoints - - namespaces - pods - pods/eviction + - secrets - serviceaccounts - services - services/finalizers @@ -38,22 +27,6 @@ rules: - patch - update - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - list - apiGroups: - apiextensions.k8s.io resources: @@ -159,17 +132,6 @@ rules: - nvidia.com resources: - '*' - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - nvidia.com - resources: - nvidiadrivers verbs: - create diff --git a/config/samples/nvidia_v1alpha1_nvidiadriver.yaml b/config/samples/nvidia_v1alpha1_nvidiadriver.yaml index c49b2156c..a200a0f61 100644 --- a/config/samples/nvidia_v1alpha1_nvidiadriver.yaml +++ b/config/samples/nvidia_v1alpha1_nvidiadriver.yaml @@ -8,7 +8,7 @@ spec: driverType: gpu repository: nvcr.io/nvidia image: driver - version: "535.154.05" + version: "550.127.08" imagePullPolicy: IfNotPresent imagePullSecrets: [] nodeSelector: {} diff --git a/controllers/clusterinfo/clusterinfo.go b/controllers/clusterinfo/clusterinfo.go index 8f0cec78c..b1de7fe43 100644 --- a/controllers/clusterinfo/clusterinfo.go +++ b/controllers/clusterinfo/clusterinfo.go @@ -341,7 +341,7 @@ func getOpenshiftDTKImages(ctx context.Context, c *rest.Config) map[string]strin logger := log.FromContext(ctx) name := "driver-toolkit" - namespace := "openshift" + namespace := consts.OpenshiftNamespace ocpImageClient, err := imagesv1.NewForConfig(c) if err != nil { diff --git a/controllers/clusterpolicy_controller.go b/controllers/clusterpolicy_controller.go index 30b7dec0c..11064efa2 100644 --- a/controllers/clusterpolicy_controller.go +++ b/controllers/clusterpolicy_controller.go @@ -43,7 +43,7 @@ import ( "github.com/NVIDIA/k8s-operator-libs/pkg/consts" - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" "github.com/NVIDIA/gpu-operator/internal/conditions" ) @@ -253,10 +253,10 @@ func updateCRState(ctx context.Context, r *ClusterPolicyReconciler, namespacedNa } } -func addWatchNewGPUNode(ctx context.Context, r *ClusterPolicyReconciler, c controller.Controller, mgr ctrl.Manager) error { +func addWatchNewGPUNode(r *ClusterPolicyReconciler, c controller.Controller, mgr ctrl.Manager) error { // Define a mapping from the Node object in the event to one or more // ClusterPolicy objects to Reconcile - mapFn := func(ctx context.Context, a client.Object) []reconcile.Request { + mapFn := func(ctx context.Context, n *corev1.Node) []reconcile.Request { // find all the ClusterPolicy to trigger their reconciliation opts := []client.ListOption{} // Namespace = "" to list across all namespaces. list := &gpuv1.ClusterPolicyList{} @@ -280,13 +280,13 @@ func addWatchNewGPUNode(ctx context.Context, r *ClusterPolicyReconciler, c contr return cpToRec } - p := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { + p := predicate.TypedFuncs[*corev1.Node]{ + CreateFunc: func(e event.TypedCreateEvent[*corev1.Node]) bool { labels := e.Object.GetLabels() return hasGPULabels(labels) }, - UpdateFunc: func(e event.UpdateEvent) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Node]) bool { newLabels := e.ObjectNew.GetLabels() oldLabels := e.ObjectOld.GetLabels() nodeName := e.ObjectNew.GetName() @@ -324,7 +324,7 @@ func addWatchNewGPUNode(ctx context.Context, r *ClusterPolicyReconciler, c contr } return needsUpdate }, - DeleteFunc: func(e event.DeleteEvent) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Node]) bool { // if an RHCOS GPU node is deleted, trigger a // reconciliation to ensure that there is no dangling // OpenShift Driver-Toolkit (RHCOS version-specific) @@ -341,9 +341,12 @@ func addWatchNewGPUNode(ctx context.Context, r *ClusterPolicyReconciler, c contr } err := c.Watch( - source.Kind(mgr.GetCache(), &corev1.Node{}), - handler.EnqueueRequestsFromMapFunc(mapFn), - p) + source.Kind(mgr.GetCache(), + &corev1.Node{}, + handler.TypedEnqueueRequestsFromMapFunc[*corev1.Node](mapFn), + p, + ), + ) return err } @@ -351,7 +354,8 @@ func addWatchNewGPUNode(ctx context.Context, r *ClusterPolicyReconciler, c contr // SetupWithManager sets up the controller with the Manager. func (r *ClusterPolicyReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { // Create a new controller - c, err := controller.New("clusterpolicy-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: 1, RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(minDelayCR, maxDelayCR)}) + c, err := controller.New("clusterpolicy-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: 1, + RateLimiter: workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](minDelayCR, maxDelayCR)}) if err != nil { return err } @@ -360,20 +364,32 @@ func (r *ClusterPolicyReconciler) SetupWithManager(ctx context.Context, mgr ctrl r.conditionUpdater = conditions.NewClusterPolicyUpdater(mgr.GetClient()) // Watch for changes to primary resource ClusterPolicy - err = c.Watch(source.Kind(mgr.GetCache(), &gpuv1.ClusterPolicy{}), &handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{}) + err = c.Watch(source.Kind( + mgr.GetCache(), + &gpuv1.ClusterPolicy{}, + &handler.TypedEnqueueRequestForObject[*gpuv1.ClusterPolicy]{}, + predicate.TypedGenerationChangedPredicate[*gpuv1.ClusterPolicy]{}, + ), + ) if err != nil { return err } // Watch for changes to Node labels and requeue the owner ClusterPolicy - err = addWatchNewGPUNode(ctx, r, c, mgr) + err = addWatchNewGPUNode(r, c, mgr) if err != nil { return err } // TODO(user): Modify this to be the types you create that are owned by the primary resource // Watch for changes to secondary resource Daemonsets and requeue the owner ClusterPolicy - err = c.Watch(source.Kind(mgr.GetCache(), &appsv1.DaemonSet{}), handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &gpuv1.ClusterPolicy{}, handler.OnlyControllerOwner())) + err = c.Watch( + source.Kind(mgr.GetCache(), + &appsv1.DaemonSet{}, + handler.TypedEnqueueRequestForOwner[*appsv1.DaemonSet](mgr.GetScheme(), mgr.GetRESTMapper(), &gpuv1.ClusterPolicy{}, + handler.OnlyControllerOwner()), + ), + ) if err != nil { return err } @@ -395,7 +411,7 @@ func (r *ClusterPolicyReconciler) SetupWithManager(ctx context.Context, mgr ctrl if owner == nil { return nil } - if owner.APIVersion != gpuv1.GroupVersion.String() || owner.Kind != "ClusterPolicy" { + if owner.APIVersion != gpuv1.SchemeGroupVersion.String() || owner.Kind != "ClusterPolicy" { return nil } return []string{owner.Name} diff --git a/controllers/nvidiadriver_controller.go b/controllers/nvidiadriver_controller.go index 41d5f5930..90734b143 100644 --- a/controllers/nvidiadriver_controller.go +++ b/controllers/nvidiadriver_controller.go @@ -39,8 +39,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/controllers/clusterinfo" "github.com/NVIDIA/gpu-operator/internal/conditions" "github.com/NVIDIA/gpu-operator/internal/consts" @@ -256,21 +256,54 @@ func (r *NVIDIADriverReconciler) SetupWithManager(ctx context.Context, mgr ctrl. c, err := controller.New("nvidia-driver-controller", mgr, controller.Options{ Reconciler: r, MaxConcurrentReconciles: 1, - RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(minDelayCR, maxDelayCR), + RateLimiter: workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](minDelayCR, maxDelayCR), }) if err != nil { return err } // Watch for changes to the primary resource NVIDIaDriver - err = c.Watch(source.Kind(mgr.GetCache(), &nvidiav1alpha1.NVIDIADriver{}), &handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{}) + err = c.Watch(source.Kind( + mgr.GetCache(), + &nvidiav1alpha1.NVIDIADriver{}, + &handler.TypedEnqueueRequestForObject[*nvidiav1alpha1.NVIDIADriver]{}, + predicate.TypedGenerationChangedPredicate[*nvidiav1alpha1.NVIDIADriver]{}, + ), + ) if err != nil { return err } // Watch for changes to ClusterPolicy. Whenever an event is generated for ClusterPolicy, enqueue // a reconcile request for all NVIDIADriver instances. - mapFn := func(ctx context.Context, a client.Object) []reconcile.Request { + mapFn := func(ctx context.Context, cp *gpuv1.ClusterPolicy) []reconcile.Request { + logger := log.FromContext(ctx) + opts := []client.ListOption{} + list := &nvidiav1alpha1.NVIDIADriverList{} + + err := mgr.GetClient().List(ctx, list, opts...) + if err != nil { + logger.Error(err, "Unable to list NVIDIADriver resources") + return []reconcile.Request{} + } + + reconcileRequests := []reconcile.Request{} + for _, nvidiaDriver := range list.Items { + reconcileRequests = append(reconcileRequests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: nvidiaDriver.ObjectMeta.GetName(), + Namespace: nvidiaDriver.ObjectMeta.GetNamespace(), + }, + }) + } + + return reconcileRequests + } + + // Watch for changes to the Nodes. Whenever an event is generated for ClusterPolicy, enqueue + // a reconcile request for all NVIDIADriver instances. + nodeMapFn := func(ctx context.Context, cp *corev1.Node) []reconcile.Request { logger := log.FromContext(ctx) opts := []client.ListOption{} list := &nvidiav1alpha1.NVIDIADriverList{} @@ -296,20 +329,23 @@ func (r *NVIDIADriverReconciler) SetupWithManager(ctx context.Context, mgr ctrl. } err = c.Watch( - source.Kind(mgr.GetCache(), &gpuv1.ClusterPolicy{}), - handler.EnqueueRequestsFromMapFunc(mapFn), - predicate.GenerationChangedPredicate{}, + source.Kind( + mgr.GetCache(), + &gpuv1.ClusterPolicy{}, + handler.TypedEnqueueRequestsFromMapFunc[*gpuv1.ClusterPolicy](mapFn), + predicate.TypedGenerationChangedPredicate[*gpuv1.ClusterPolicy]{}, + ), ) if err != nil { return err } - nodePredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { + nodePredicate := predicate.TypedFuncs[*corev1.Node]{ + CreateFunc: func(e event.TypedCreateEvent[*corev1.Node]) bool { labels := e.Object.GetLabels() return hasGPULabels(labels) }, - UpdateFunc: func(e event.UpdateEvent) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Node]) bool { logger := log.FromContext(ctx) newLabels := e.ObjectNew.GetLabels() oldLabels := e.ObjectOld.GetLabels() @@ -324,7 +360,7 @@ func (r *NVIDIADriverReconciler) SetupWithManager(ctx context.Context, mgr ctrl. } return needsUpdate }, - DeleteFunc: func(e event.DeleteEvent) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Node]) bool { labels := e.Object.GetLabels() return hasGPULabels(labels) }, @@ -332,9 +368,11 @@ func (r *NVIDIADriverReconciler) SetupWithManager(ctx context.Context, mgr ctrl. // Watch for changes to node labels err = c.Watch( - source.Kind(mgr.GetCache(), &corev1.Node{}), - handler.EnqueueRequestsFromMapFunc(mapFn), - nodePredicate, + source.Kind(mgr.GetCache(), + &corev1.Node{}, + handler.TypedEnqueueRequestsFromMapFunc[*corev1.Node](nodeMapFn), + nodePredicate, + ), ) if err != nil { return err @@ -342,20 +380,9 @@ func (r *NVIDIADriverReconciler) SetupWithManager(ctx context.Context, mgr ctrl. // Watch for changes to secondary resources which each state manager manages watchSources := stateManager.GetWatchSources(mgr) - nvDriverPredicate, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: map[string]string{AppComponentLabelKey: AppComponentLabelValue}}) - if err != nil { - return fmt.Errorf("failed to create labelSelector predicate: %w", err) - } for _, watchSource := range watchSources { err = c.Watch( watchSource, - handler.EnqueueRequestForOwner( - mgr.GetScheme(), - mgr.GetRESTMapper(), - &nvidiav1alpha1.NVIDIADriver{}, - handler.OnlyControllerOwner(), - ), - nvDriverPredicate, ) if err != nil { return fmt.Errorf("error setting up Watch for source type %v: %w", watchSource, err) @@ -369,7 +396,7 @@ func (r *NVIDIADriverReconciler) SetupWithManager(ctx context.Context, mgr ctrl. if owner == nil { return nil } - if owner.APIVersion != nvidiav1alpha1.GroupVersion.String() || owner.Kind != nvidiav1alpha1.NVIDIADriverCRDName { + if owner.APIVersion != nvidiav1alpha1.SchemeGroupVersion.String() || owner.Kind != nvidiav1alpha1.NVIDIADriverCRDName { return nil } return []string{owner.Name} diff --git a/controllers/object_controls.go b/controllers/object_controls.go index b66b0b630..1b1801391 100644 --- a/controllers/object_controls.go +++ b/controllers/object_controls.go @@ -32,7 +32,6 @@ import ( "path/filepath" "github.com/davecgh/go-spew/spew" - "github.com/mitchellh/hashstructure" apiconfigv1 "github.com/openshift/api/config/v1" apiimagev1 "github.com/openshift/api/image/v1" secv1 "github.com/openshift/api/security/v1" @@ -52,7 +51,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/yaml" - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + "github.com/NVIDIA/gpu-operator/internal/consts" + "github.com/NVIDIA/gpu-operator/internal/utils" ) const ( @@ -112,8 +113,8 @@ const ( MigDefaultGPUClientsConfigMapName = "default-gpu-clients" // DCGMRemoteEngineEnvName indicates env name to specify remote DCGM host engine ip:port DCGMRemoteEngineEnvName = "DCGM_REMOTE_HOSTENGINE_INFO" - // DCGMDefaultHostPort indicates default host port bound to DCGM host engine - DCGMDefaultHostPort = 5555 + // DCGMDefaultPort indicates default port bound to DCGM host engine + DCGMDefaultPort = 5555 // GPUDirectRDMAEnabledEnvName indicates if GPU direct RDMA is enabled through GPU operator GPUDirectRDMAEnabledEnvName = "GPU_DIRECT_RDMA_ENABLED" // UseHostMOFEDEnvName indicates if MOFED driver is pre-installed on the host @@ -146,8 +147,8 @@ const ( NvidiaCtrRuntimeCDIPrefixesEnvName = "NVIDIA_CONTAINER_RUNTIME_MODES_CDI_ANNOTATION_PREFIXES" // CDIEnabledEnvName is the name of the envvar used to enable CDI in the operands CDIEnabledEnvName = "CDI_ENABLED" - // NvidiaCTKPathEnvName is the name of the envvar specifying the path to the 'nvidia-ctk' binary - NvidiaCTKPathEnvName = "NVIDIA_CTK_PATH" + // NvidiaCDIHookPathEnvName is the name of the envvar specifying the path to the 'nvidia-cdi-hook' binary + NvidiaCDIHookPathEnvName = "NVIDIA_CDI_HOOK_PATH" // CrioConfigModeEnvName is the name of the envvar controlling how the toolkit container updates the cri-o configuration CrioConfigModeEnvName = "CRIO_CONFIG_MODE" // DeviceListStrategyEnvName is the name of the envvar for configuring the device-list-strategy in the device-plugin @@ -164,6 +165,19 @@ const ( DefaultCCModeEnvName = "DEFAULT_CC_MODE" // OpenKernelModulesEnabledEnvName is the name of the driver-container envvar for enabling open GPU kernel module support OpenKernelModulesEnabledEnvName = "OPEN_KERNEL_MODULES_ENABLED" + // MPSRootEnvName is the name of the envvar for configuring the MPS root + MPSRootEnvName = "MPS_ROOT" + // DefaultMPSRoot is the default MPS root path on the host + DefaultMPSRoot = "/run/nvidia/mps" + // HostRootEnvName is the name of the envvar representing the root path of the underlying host + HostRootEnvName = "HOST_ROOT" + // DefaultDriverInstallDir represents the default path of a driver container installation + DefaultDriverInstallDir = "/run/nvidia/driver" + // DriverInstallDirEnvName is the name of the envvar used by the driver-validator to represent the driver install dir + DriverInstallDirEnvName = "DRIVER_INSTALL_DIR" + // DriverInstallDirCtrPathEnvName is the name of the envvar used by the driver-validator to represent the path + // of the driver install dir mounted in the container + DriverInstallDirCtrPathEnvName = "DRIVER_INSTALL_DIR_CTR_PATH" ) // ContainerProbe defines container probe types @@ -276,11 +290,11 @@ func ServiceAccount(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ServiceAccount.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ServiceAccount", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ServiceAccount", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -288,11 +302,11 @@ func ServiceAccount(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, skipping update") return gpuv1.Ready, nil @@ -311,11 +325,11 @@ func Role(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].Role.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("Role", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("Role", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -323,14 +337,14 @@ func Role(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -352,11 +366,11 @@ func RoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].RoleBinding.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("RoleBinding", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("RoleBinding", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -374,14 +388,14 @@ func RoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj.Subjects[idx].Namespace = n.operatorNamespace } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -403,11 +417,11 @@ func ClusterRole(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ClusterRole.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ClusterRole", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ClusterRole", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -415,14 +429,14 @@ func ClusterRole(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -444,11 +458,11 @@ func ClusterRoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ClusterRoleBinding.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ClusterRoleBinding", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ClusterRoleBinding", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -460,14 +474,14 @@ func ClusterRoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj.Subjects[idx].Namespace = n.operatorNamespace } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -490,11 +504,11 @@ func createConfigMap(n ClusterPolicyController, configMapIdx int) (gpuv1.State, obj := n.resources[state].ConfigMaps[configMapIdx].DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ConfigMap", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ConfigMap", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -536,18 +550,18 @@ func createConfigMap(n ClusterPolicyController, configMapIdx int) (gpuv1.State, } } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if !apierrors.IsAlreadyExists(err) { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err } logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -577,7 +591,7 @@ func ConfigMaps(n ClusterPolicyController) (gpuv1.State, error) { func (n ClusterPolicyController) getKernelVersionsMap() (map[string]string, error) { kernelVersionMap := make(map[string]string) ctx := n.ctx - logger := n.rec.Log.WithValues("Request.Namespace", "default", "Request.Name", "Node") + logger := n.logger.WithValues("Request.Namespace", "default", "Request.Name", "Node") // Filter only GPU nodes opts := []client.ListOption{ @@ -585,7 +599,7 @@ func (n ClusterPolicyController) getKernelVersionsMap() (map[string]string, erro } list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { logger.Info("Could not get NodeList", "ERROR", err) return nil, err @@ -625,14 +639,14 @@ func (n ClusterPolicyController) getKernelVersionsMap() (map[string]string, erro func kernelFullVersion(n ClusterPolicyController) (string, string, string) { ctx := n.ctx - logger := n.rec.Log.WithValues("Request.Namespace", "default", "Request.Name", "Node") + logger := n.logger.WithValues("Request.Namespace", "default", "Request.Name", "Node") // We need the node labels to fetch the correct container opts := []client.ListOption{ client.MatchingLabels{"nvidia.com/gpu.present": "true"}, } list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { logger.Info("Could not get NodeList", "ERROR", err) return "", "", "" @@ -674,7 +688,7 @@ func kernelFullVersion(n ClusterPolicyController) (string, string, string) { } func preProcessDaemonSet(obj *appsv1.DaemonSet, n ClusterPolicyController) error { - logger := n.rec.Log.WithValues("Daemonset", obj.Name) + logger := n.logger.WithValues("Daemonset", obj.Name) transformations := map[string]func(*appsv1.DaemonSet, *gpuv1.ClusterPolicySpec, ClusterPolicyController) error{ "nvidia-driver-daemonset": TransformDriver, "nvidia-vgpu-manager-daemonset": TransformVGPUManager, @@ -708,6 +722,12 @@ func preProcessDaemonSet(obj *appsv1.DaemonSet, n ClusterPolicyController) error return err } + // transform the host-root and host-dev-char volumes if a custom host root is configured with the operator + transformForHostRoot(obj, n.singleton.Spec.HostPaths.RootFS) + + // transform the driver-root volume if a custom driver install dir is configured with the operator + transformForDriverInstallDir(obj, n.singleton.Spec.HostPaths.DriverInstallDir) + // apply per operand Daemonset config err = t(obj, &n.singleton.Spec, n) if err != nil { @@ -769,6 +789,81 @@ func applyCommonDaemonsetConfig(obj *appsv1.DaemonSet, config *gpuv1.ClusterPoli return nil } +// apply necessary transforms if a custom host root path is configured +func transformForHostRoot(obj *appsv1.DaemonSet, hostRoot string) { + if hostRoot == "" || hostRoot == "/" { + return + } + + transformHostRootVolume(obj, hostRoot) + transformHostDevCharVolume(obj, hostRoot) +} + +func transformHostRootVolume(obj *appsv1.DaemonSet, hostRoot string) { + containsHostRootVolume := false + for _, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "host-root" { + volume.HostPath.Path = hostRoot + containsHostRootVolume = true + break + } + } + + if !containsHostRootVolume { + return + } + + for index := range obj.Spec.Template.Spec.InitContainers { + setContainerEnv(&(obj.Spec.Template.Spec.InitContainers[index]), HostRootEnvName, hostRoot) + } + + for index := range obj.Spec.Template.Spec.Containers { + setContainerEnv(&(obj.Spec.Template.Spec.Containers[index]), HostRootEnvName, hostRoot) + } +} + +func transformHostDevCharVolume(obj *appsv1.DaemonSet, hostRoot string) { + for _, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "host-dev-char" { + volume.HostPath.Path = filepath.Join(hostRoot, "/dev/char") + break + } + } +} + +// apply necessary transforms if a custom driver install directory is configured +func transformForDriverInstallDir(obj *appsv1.DaemonSet, driverInstallDir string) { + if driverInstallDir == "" || driverInstallDir == DefaultDriverInstallDir { + return + } + + containsDriverInstallDirVolume := false + podSpec := obj.Spec.Template.Spec + for _, volume := range podSpec.Volumes { + if volume.Name == "driver-install-dir" { + volume.HostPath.Path = driverInstallDir + containsDriverInstallDirVolume = true + break + } + } + + if !containsDriverInstallDirVolume { + return + } + + for i, ctr := range podSpec.InitContainers { + if ctr.Name == "driver-validation" { + setContainerEnv(&(podSpec.InitContainers[i]), DriverInstallDirEnvName, driverInstallDir) + setContainerEnv(&(podSpec.InitContainers[i]), DriverInstallDirCtrPathEnvName, driverInstallDir) + for j, volumeMount := range ctr.VolumeMounts { + if volumeMount.Name == "driver-install-dir" { + podSpec.InitContainers[i].VolumeMounts[j].MountPath = driverInstallDir + } + } + } + } +} + // TransformGPUDiscoveryPlugin transforms GPU discovery daemonset with required config as per ClusterPolicy func TransformGPUDiscoveryPlugin(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n ClusterPolicyController) error { // update validation container @@ -784,6 +879,14 @@ func TransformGPUDiscoveryPlugin(obj *appsv1.DaemonSet, config *gpuv1.ClusterPol } obj.Spec.Template.Spec.Containers[0].Image = img + // update image for IMEX init container + for i, initCtr := range obj.Spec.Template.Spec.InitContainers { + if initCtr.Name == "gpu-feature-discovery-imex-init" { + obj.Spec.Template.Spec.InitContainers[i].Image = img + break + } + } + // update image pull policy obj.Spec.Template.Spec.Containers[0].ImagePullPolicy = gpuv1.ImagePullPolicy(config.GPUFeatureDiscovery.ImagePullPolicy) @@ -1016,17 +1119,17 @@ func getOrCreateTrustedCAConfigMap(n ClusterPolicyController, name string) (*cor configMap.ObjectMeta.Labels = make(map[string]string) configMap.ObjectMeta.Labels["config.openshift.io/inject-trusted-cabundle"] = "true" - logger := n.rec.Log.WithValues("ConfigMap", configMap.ObjectMeta.Name, "Namespace", configMap.ObjectMeta.Namespace) + logger := n.logger.WithValues("ConfigMap", configMap.ObjectMeta.Name, "Namespace", configMap.ObjectMeta.Namespace) - if err := controllerutil.SetControllerReference(n.singleton, configMap, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, configMap, n.scheme); err != nil { return nil, err } found := &corev1.ConfigMap{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: configMap.ObjectMeta.Namespace, Name: configMap.ObjectMeta.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: configMap.ObjectMeta.Namespace, Name: configMap.ObjectMeta.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating") - err = n.rec.Client.Create(ctx, configMap) + err = n.client.Create(ctx, configMap) if err != nil { logger.Info("Couldn't create") return nil, fmt.Errorf("failed to create trusted CA bundle config map %q: %s", name, err) @@ -1144,15 +1247,43 @@ func TransformToolkit(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n // configure runtime runtime := n.runtime.String() - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), "RUNTIME", runtime) + err = transformForRuntime(obj, config, runtime, "nvidia-container-toolkit-ctr") + if err != nil { + return fmt.Errorf("error transforming toolkit daemonset : %w", err) + } + + // Update CRI-O hooks path to use default path for non OCP cases + if n.openshift == "" && n.runtime == gpuv1.CRIO { + for index, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "crio-hooks" { + obj.Spec.Template.Spec.Volumes[index].HostPath.Path = "/usr/share/containers/oci/hooks.d" + } + } + } + return nil +} + +func transformForRuntime(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, runtime string, containerName string) error { + var mainContainer *corev1.Container + for i, ctr := range obj.Spec.Template.Spec.Containers { + if ctr.Name == containerName { + mainContainer = &obj.Spec.Template.Spec.Containers[i] + break + } + } + if mainContainer == nil { + return fmt.Errorf("failed to find main container %q", containerName) + } + + setContainerEnv(mainContainer, "RUNTIME", runtime) if runtime == gpuv1.Containerd.String() { // Set the runtime class name that is to be configured for containerd - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), "CONTAINERD_RUNTIME_CLASS", getRuntimeClass(config)) + setContainerEnv(mainContainer, "CONTAINERD_RUNTIME_CLASS", getRuntimeClass(config)) } // setup mounts for runtime config file - runtimeConfigFile, err := getRuntimeConfigFile(&(obj.Spec.Template.Spec.Containers[0]), runtime) + runtimeConfigFile, err := getRuntimeConfigFile(mainContainer, runtime) if err != nil { return fmt.Errorf("error getting path to runtime config file: %v", err) } @@ -1168,19 +1299,20 @@ func TransformToolkit(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n configEnvvarName = "CRIO_CONFIG" } - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), configEnvvarName, DefaultRuntimeConfigTargetDir+sourceConfigFileName) + setContainerEnv(mainContainer, "RUNTIME_CONFIG", DefaultRuntimeConfigTargetDir+sourceConfigFileName) + setContainerEnv(mainContainer, configEnvvarName, DefaultRuntimeConfigTargetDir+sourceConfigFileName) volMountConfigName := fmt.Sprintf("%s-config", runtime) volMountConfig := corev1.VolumeMount{Name: volMountConfigName, MountPath: DefaultRuntimeConfigTargetDir} - obj.Spec.Template.Spec.Containers[0].VolumeMounts = append(obj.Spec.Template.Spec.Containers[0].VolumeMounts, volMountConfig) + mainContainer.VolumeMounts = append(mainContainer.VolumeMounts, volMountConfig) configVol := corev1.Volume{Name: volMountConfigName, VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: path.Dir(runtimeConfigFile), Type: newHostPathType(corev1.HostPathDirectoryOrCreate)}}} obj.Spec.Template.Spec.Volumes = append(obj.Spec.Template.Spec.Volumes, configVol) // setup mounts for runtime socket file - runtimeSocketFile, err := getRuntimeSocketFile(&(obj.Spec.Template.Spec.Containers[0]), runtime) + runtimeSocketFile, err := getRuntimeSocketFile(mainContainer, runtime) if err != nil { - return fmt.Errorf("error getting path to runtime socket: %v", err) + return fmt.Errorf("error getting path to runtime socket: %w", err) } if runtimeSocketFile != "" { sourceSocketFileName := path.Base(runtimeSocketFile) @@ -1191,24 +1323,16 @@ func TransformToolkit(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n } else if runtime == gpuv1.Docker.String() { socketEnvvarName = "DOCKER_SOCKET" } - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), socketEnvvarName, DefaultRuntimeSocketTargetDir+sourceSocketFileName) + setContainerEnv(mainContainer, "RUNTIME_SOCKET", DefaultRuntimeSocketTargetDir+sourceSocketFileName) + setContainerEnv(mainContainer, socketEnvvarName, DefaultRuntimeSocketTargetDir+sourceSocketFileName) volMountSocketName := fmt.Sprintf("%s-socket", runtime) volMountSocket := corev1.VolumeMount{Name: volMountSocketName, MountPath: DefaultRuntimeSocketTargetDir} - obj.Spec.Template.Spec.Containers[0].VolumeMounts = append(obj.Spec.Template.Spec.Containers[0].VolumeMounts, volMountSocket) + mainContainer.VolumeMounts = append(mainContainer.VolumeMounts, volMountSocket) socketVol := corev1.Volume{Name: volMountSocketName, VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: path.Dir(runtimeSocketFile)}}} obj.Spec.Template.Spec.Volumes = append(obj.Spec.Template.Spec.Volumes, socketVol) } - - // Update CRI-O hooks path to use default path for non OCP cases - if n.openshift == "" && n.runtime == gpuv1.CRIO { - for index, volume := range obj.Spec.Template.Spec.Volumes { - if volume.Name == "crio-hooks" { - obj.Spec.Template.Spec.Volumes[index].HostPath.Path = "/usr/share/containers/oci/hooks.d" - } - } - } return nil } @@ -1277,8 +1401,21 @@ func TransformDevicePlugin(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), DeviceListStrategyEnvName, "envvar,cdi-annotations") setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), CDIAnnotationPrefixEnvName, "nvidia.cdi.k8s.io/") if config.Toolkit.IsEnabled() { - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), NvidiaCTKPathEnvName, filepath.Join(config.Toolkit.InstallDir, "toolkit/nvidia-ctk")) + setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), NvidiaCDIHookPathEnvName, filepath.Join(config.Toolkit.InstallDir, "toolkit/nvidia-cdi-hook")) + } + } + + // update MPS volumes and set MPS_ROOT env var if a custom MPS root is configured + if config.DevicePlugin.MPS != nil && config.DevicePlugin.MPS.Root != "" && + config.DevicePlugin.MPS.Root != DefaultMPSRoot { + for i, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "mps-root" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = config.DevicePlugin.MPS.Root + } else if volume.Name == "mps-shm" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = filepath.Join(config.DevicePlugin.MPS.Root, "shm") + } } + setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), MPSRootEnvName, config.DevicePlugin.MPS.Root) } return nil @@ -1346,6 +1483,18 @@ func TransformMPSControlDaemon(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolic // update env required for MIG support applyMIGConfiguration(mainContainer, config.MIG.Strategy) + // update MPS volumes if a custom MPS root is configured + if config.DevicePlugin.MPS != nil && config.DevicePlugin.MPS.Root != "" && + config.DevicePlugin.MPS.Root != DefaultMPSRoot { + for i, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "mps-root" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = config.DevicePlugin.MPS.Root + } else if volume.Name == "mps-shm" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = filepath.Join(config.DevicePlugin.MPS.Root, "shm") + } + } + } + return nil } @@ -1432,14 +1581,7 @@ func TransformDCGMExporter(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe // check if DCGM hostengine is enabled as a separate Pod and setup env accordingly if config.DCGM.IsEnabled() { - // enable hostNetwork for communication with external DCGM using NODE_IP(localhost) - obj.Spec.Template.Spec.HostNetwork = true - // set DCGM host engine env. localhost will be substituted during pod runtime - dcgmHostPort := int32(DCGMDefaultHostPort) - if config.DCGM.HostPort != 0 { - dcgmHostPort = config.DCGM.HostPort - } - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), DCGMRemoteEngineEnvName, fmt.Sprintf("localhost:%d", dcgmHostPort)) + setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), DCGMRemoteEngineEnvName, fmt.Sprintf("nvidia-dcgm:%d", DCGMDefaultPort)) } else { // case for DCGM running on the host itself(DGX BaseOS) remoteEngine := getContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), DCGMRemoteEngineEnvName) @@ -1448,6 +1590,7 @@ func TransformDCGMExporter(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe obj.Spec.Template.Spec.HostNetwork = true } } + // set RuntimeClass for supported runtimes setRuntimeClass(&obj.Spec.Template.Spec, n.runtime, config.Operator.RuntimeClass) @@ -1565,16 +1708,6 @@ func TransformDCGM(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n Clu } } - // set host port to bind for DCGM engine - for i, port := range obj.Spec.Template.Spec.Containers[0].Ports { - if port.Name == "dcgm" { - obj.Spec.Template.Spec.Containers[0].Ports[i].HostPort = DCGMDefaultHostPort - if config.DCGM.HostPort != 0 { - obj.Spec.Template.Spec.Containers[0].Ports[i].HostPort = config.DCGM.HostPort - } - } - } - // set RuntimeClass for supported runtimes setRuntimeClass(&obj.Spec.Template.Spec, n.runtime, config.Operator.RuntimeClass) @@ -1659,6 +1792,9 @@ func TransformMIGManager(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, // update env required for CDI support if config.CDI.IsEnabled() { setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), CDIEnabledEnvName, "true") + if config.Toolkit.IsEnabled() { + setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), NvidiaCDIHookPathEnvName, filepath.Join(config.Toolkit.InstallDir, "toolkit/nvidia-cdi-hook")) + } } return nil @@ -1721,47 +1857,20 @@ func TransformKataManager(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec // setup mounts for runtime config file runtime := n.runtime.String() - runtimeConfigFile, err := getRuntimeConfigFile(&(obj.Spec.Template.Spec.Containers[0]), runtime) + err = transformForRuntime(obj, config, runtime, "nvidia-kata-manager") if err != nil { - return fmt.Errorf("error getting path to runtime config file: %v", err) + return fmt.Errorf("error transforming kata-manager daemonset : %w", err) } - sourceConfigFileName := path.Base(runtimeConfigFile) - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), "CONTAINERD_CONFIG", filepath.Join(DefaultRuntimeConfigTargetDir, sourceConfigFileName)) - - volMountConfigName := fmt.Sprintf("%s-config", runtime) - volMountConfig := corev1.VolumeMount{Name: volMountConfigName, MountPath: DefaultRuntimeConfigTargetDir} - obj.Spec.Template.Spec.Containers[0].VolumeMounts = append(obj.Spec.Template.Spec.Containers[0].VolumeMounts, volMountConfig) - - configVol := corev1.Volume{Name: volMountConfigName, VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: path.Dir(runtimeConfigFile), Type: newHostPathType(corev1.HostPathDirectoryOrCreate)}}} - obj.Spec.Template.Spec.Volumes = append(obj.Spec.Template.Spec.Volumes, configVol) - - // setup mounts for runtime socket file - runtimeSocketFile, err := getRuntimeSocketFile(&(obj.Spec.Template.Spec.Containers[0]), runtime) - if err != nil { - return fmt.Errorf("error getting path to runtime socket: %v", err) - } - sourceSocketFileName := path.Base(runtimeSocketFile) - setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), "CONTAINERD_SOCKET", filepath.Join(DefaultRuntimeSocketTargetDir, sourceSocketFileName)) - - volMountSocketName := fmt.Sprintf("%s-socket", runtime) - volMountSocket := corev1.VolumeMount{Name: volMountSocketName, MountPath: DefaultRuntimeSocketTargetDir} - obj.Spec.Template.Spec.Containers[0].VolumeMounts = append(obj.Spec.Template.Spec.Containers[0].VolumeMounts, volMountSocket) - - socketVol := corev1.Volume{Name: volMountSocketName, VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: path.Dir(runtimeSocketFile)}}} - obj.Spec.Template.Spec.Volumes = append(obj.Spec.Template.Spec.Volumes, socketVol) // Compute hash of kata manager config and add an annotation with the value. // If the kata config changes, a new revision of the daemonset will be // created and thus the kata-manager pods will restart with the updated config. - hash, err := hashstructure.Hash(config.KataManager.Config, nil) - if err != nil { - return fmt.Errorf("failed to get hash of kata-manager config: %v", err) - } + hash := utils.GetObjectHash(config.KataManager.Config) if obj.Spec.Template.Annotations == nil { obj.Spec.Template.Annotations = make(map[string]string) } - obj.Spec.Template.Annotations[KataManagerAnnotationHashKey] = strconv.FormatUint(hash, 16) + obj.Spec.Template.Annotations[KataManagerAnnotationHashKey] = hash return nil } @@ -1929,7 +2038,7 @@ func TransformVGPUDeviceManager(obj *appsv1.DaemonSet, config *gpuv1.ClusterPoli // TransformValidator transforms nvidia-operator-validator daemonset with required config as per ClusterPolicy func TransformValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n ClusterPolicyController) error { - err := TransformValidatorShared(obj, config, n) + err := TransformValidatorShared(obj, config) if err != nil { return fmt.Errorf("%v", err) } @@ -1954,7 +2063,7 @@ func TransformValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, } if validatorErr != nil { - n.rec.Log.Info("WARN: errors transforming the validator containers: %v", validatorErr) + n.logger.Info("WARN: errors transforming the validator containers: %v", validatorErr) } return nil @@ -1962,7 +2071,7 @@ func TransformValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, // TransformSandboxValidator transforms nvidia-sandbox-validator daemonset with required config as per ClusterPolicy func TransformSandboxValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n ClusterPolicyController) error { - err := TransformValidatorShared(obj, config, n) + err := TransformValidatorShared(obj, config) if err != nil { return fmt.Errorf("%v", err) } @@ -1983,14 +2092,14 @@ func TransformSandboxValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolic } if validatorErr != nil { - n.rec.Log.Info("WARN: errors transforming the validator containers: %v", validatorErr) + n.logger.Info("WARN: errors transforming the validator containers: %v", validatorErr) } return nil } // TransformValidatorShared applies general transformations to the validator daemonset with required config as per ClusterPolicy -func TransformValidatorShared(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n ClusterPolicyController) error { +func TransformValidatorShared(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec) error { // update image image, err := gpuv1.ImagePath(&config.Validator) if err != nil { @@ -2331,10 +2440,7 @@ func isCustomPluginConfigSet(pluginConfig *gpuv1.DevicePluginConfig) bool { // adds shared volume mounts required for custom plugin config provided via a ConfigMap func addSharedMountsForPluginConfig(container *corev1.Container, config *gpuv1.DevicePluginConfig) { - emptyDirMount := corev1.VolumeMount{Name: "config", MountPath: "/config"} configVolMount := corev1.VolumeMount{Name: config.Name, MountPath: "/available-configs"} - - container.VolumeMounts = append(container.VolumeMounts, emptyDirMount) container.VolumeMounts = append(container.VolumeMounts, configVolMount) } @@ -2370,15 +2476,19 @@ func handleDevicePluginConfig(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicy continue } setContainerEnv(&obj.Spec.Template.Spec.Containers[i], "CONFIG_FILE", "/config/config.yaml") - // setup sharedvolume(emptydir) for main container + // add configmap volume mount addSharedMountsForPluginConfig(&obj.Spec.Template.Spec.Containers[i], config.DevicePlugin.Config) } - // Enable process ns sharing for PID access - shareProcessNamespace := true - obj.Spec.Template.Spec.ShareProcessNamespace = &shareProcessNamespace - // setup volumes from configmap and shared emptyDir + + // if hostPID is already set, we skip setting the shareProcessNamespace field + // for context, go to https://github.com/kubernetes-client/go/blob/master/kubernetes/docs/V1PodSpec.md + if !obj.Spec.Template.Spec.HostPID { + // Enable process ns sharing for PID access + shareProcessNamespace := true + obj.Spec.Template.Spec.ShareProcessNamespace = &shareProcessNamespace + } + // add configmap volume obj.Spec.Template.Spec.Volumes = append(obj.Spec.Template.Spec.Volumes, createConfigMapVolume(config.DevicePlugin.Config.Name, nil)) - obj.Spec.Template.Spec.Volumes = append(obj.Spec.Template.Spec.Volumes, createEmptyDirVolume("config")) // apply env/volume changes to initContainer err := transformConfigManagerInitContainer(obj, config) @@ -2546,7 +2656,7 @@ func transformGDSContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe continue } if config.GPUDirectStorage == nil || !config.GPUDirectStorage.IsEnabled() { - n.rec.Log.Info("GPUDirect Storage is disabled") + n.logger.Info("GPUDirect Storage is disabled") // remove nvidia-fs sidecar container from driver Daemonset if GDS is not enabled obj.Spec.Template.Spec.Containers = append(obj.Spec.Template.Spec.Containers[:i], obj.Spec.Template.Spec.Containers[i+1:]...) return nil @@ -2628,7 +2738,7 @@ func transformGDRCopyContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolic continue } if config.GDRCopy == nil || !config.GDRCopy.IsEnabled() { - n.rec.Log.Info("GDRCopy is disabled") + n.logger.Info("GDRCopy is disabled") // remove nvidia-gdrcopy sidecar container from driver Daemonset if gdrcopy is not enabled obj.Spec.Template.Spec.Containers = append(obj.Spec.Template.Spec.Containers[:i], obj.Spec.Template.Spec.Containers[i+1:]...) return nil @@ -2747,12 +2857,12 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu return nil, nil } - return nil, fmt.Errorf(fmt.Sprintf("could not find the '%s' container", name)) + return nil, fmt.Errorf("could not find the '%s' container", name) } if !n.ocpDriverToolkit.enabled { if n.ocpDriverToolkit.requested { - n.rec.Log.Info("OpenShift DriverToolkit was requested but could not be enabled (dependencies missing)") + n.logger.Info("OpenShift DriverToolkit was requested but could not be enabled (dependencies missing)") } /* remove OpenShift Driver Toolkit side-car container from the Driver DaemonSet */ @@ -2792,18 +2902,18 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu if config.GPUDirectStorage != nil && config.GPUDirectStorage.IsEnabled() { setContainerEnv(driverToolkitContainer, "GDS_ENABLED", "true") - n.rec.Log.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDS_ENABLED", config.GPUDirectStorage.IsEnabled()) + n.logger.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDS_ENABLED", config.GPUDirectStorage.IsEnabled()) } if config.GDRCopy != nil && config.GDRCopy.IsEnabled() { setContainerEnv(driverToolkitContainer, "GDRCOPY_ENABLED", "true") - n.rec.Log.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDRCOPY_ENABLED", "true") + n.logger.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDRCOPY_ENABLED", "true") } image := n.ocpDriverToolkit.rhcosDriverToolkitImages[n.ocpDriverToolkit.currentRhcosVersion] if image != "" { driverToolkitContainer.Image = image - n.rec.Log.Info("DriverToolkit", "image", driverToolkitContainer.Image) + n.logger.Info("DriverToolkit", "image", driverToolkitContainer.Image) } else { /* RHCOS tag missing in the Driver-Toolkit imagestream, setup fallback */ obj.ObjectMeta.Labels["openshift.driver-toolkit.rhcos-image-missing"] = "true" @@ -2814,7 +2924,7 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu setContainerEnv(mainContainer, "RHCOS_VERSION", rhcosVersion) setContainerEnv(driverToolkitContainer, "RHCOS_IMAGE_MISSING", "true") - n.rec.Log.Info("WARNING: DriverToolkit image tag missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) + n.logger.Info("WARNING: DriverToolkit image tag missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) } /* prepare the main container to start from the DriverToolkit entrypoint */ @@ -2969,7 +3079,7 @@ func createConfigMapVolumeMounts(n ClusterPolicyController, configMapName string // get the ConfigMap cm := &corev1.ConfigMap{} opts := client.ObjectKey{Namespace: n.operatorNamespace, Name: configMapName} - err := n.rec.Client.Get(ctx, opts, cm) + err := n.client.Get(ctx, opts, cm) if err != nil { return nil, nil, fmt.Errorf("ERROR: could not get ConfigMap %s from client: %v", configMapName, err) } @@ -3006,15 +3116,6 @@ func createConfigMapVolume(configMapName string, itemsToInclude []corev1.KeyToPa return corev1.Volume{Name: configMapName, VolumeSource: volumeSource} } -func createEmptyDirVolume(volumeName string) corev1.Volume { - return corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - } -} - func transformDriverContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, n ClusterPolicyController) error { driverIndex := 0 driverCtrFound := false @@ -3204,7 +3305,7 @@ func transformDriverContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicy // set up subscription entitlements for RHEL(using K8s with a non-CRIO runtime) and SLES if (release["ID"] == "rhel" && n.openshift == "" && n.runtime != gpuv1.CRIO) || release["ID"] == "sles" { - n.rec.Log.Info("Mounting subscriptions into the driver container", "OS", release["ID"]) + n.logger.Info("Mounting subscriptions into the driver container", "OS", release["ID"]) pathToVolumeSource, err := getSubscriptionPathsToVolumeSources() if err != nil { return fmt.Errorf("ERROR: failed to get path items for subscription entitlements: %v", err) @@ -3402,19 +3503,19 @@ func isDeploymentReady(name string, n ClusterPolicyController) gpuv1.State { opts := []client.ListOption{ client.MatchingLabels{"app": name}, } - n.rec.Log.V(1).Info("Deployment", "LabelSelector", fmt.Sprintf("app=%s", name)) + n.logger.V(1).Info("Deployment", "LabelSelector", fmt.Sprintf("app=%s", name)) list := &appsv1.DeploymentList{} - err := n.rec.Client.List(n.ctx, list, opts...) + err := n.client.List(n.ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get DeploymentList", err) + n.logger.Info("Could not get DeploymentList", err) } - n.rec.Log.V(1).Info("Deployment", "NumberOfDeployment", len(list.Items)) + n.logger.V(1).Info("Deployment", "NumberOfDeployment", len(list.Items)) if len(list.Items) == 0 { return gpuv1.NotReady } ds := list.Items[0] - n.rec.Log.V(1).Info("Deployment", "NumberUnavailable", ds.Status.UnavailableReplicas) + n.logger.V(1).Info("Deployment", "NumberUnavailable", ds.Status.UnavailableReplicas) if ds.Status.UnavailableReplicas != 0 { return gpuv1.NotReady @@ -3426,19 +3527,19 @@ func isDeploymentReady(name string, n ClusterPolicyController) gpuv1.State { func isDaemonSetReady(name string, n ClusterPolicyController) gpuv1.State { ctx := n.ctx ds := &appsv1.DaemonSet{} - n.rec.Log.V(2).Info("checking daemonset for readiness", "name", name) - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: n.operatorNamespace, Name: name}, ds) + n.logger.V(2).Info("checking daemonset for readiness", "name", name) + err := n.client.Get(ctx, types.NamespacedName{Namespace: n.operatorNamespace, Name: name}, ds) if err != nil { - n.rec.Log.Error(err, "could not get daemonset", "name", name) + n.logger.Error(err, "could not get daemonset", "name", name) } if ds.Status.DesiredNumberScheduled == 0 { - n.rec.Log.V(2).Info("Daemonset has desired pods of 0", "name", name) + n.logger.V(2).Info("Daemonset has desired pods of 0", "name", name) return gpuv1.Ready } if ds.Status.NumberUnavailable != 0 { - n.rec.Log.Info("daemonset not ready", "name", name) + n.logger.Info("daemonset not ready", "name", name) return gpuv1.NotReady } @@ -3449,14 +3550,14 @@ func isDaemonSetReady(name string, n ClusterPolicyController) gpuv1.State { opts := []client.ListOption{client.MatchingLabels(ds.Spec.Template.ObjectMeta.Labels)} - n.rec.Log.V(2).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) + n.logger.V(2).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) list := &corev1.PodList{} - err = n.rec.Client.List(ctx, list, opts...) + err = n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get PodList", err) + n.logger.Info("Could not get PodList", err) return gpuv1.NotReady } - n.rec.Log.V(2).Info("Pod", "NumberOfPods", len(list.Items)) + n.logger.V(2).Info("Pod", "NumberOfPods", len(list.Items)) if len(list.Items) == 0 { return gpuv1.NotReady } @@ -3464,21 +3565,21 @@ func isDaemonSetReady(name string, n ClusterPolicyController) gpuv1.State { dsPods := getPodsOwnedbyDaemonset(ds, list.Items, n) daemonsetRevisionHash, err := getDaemonsetControllerRevisionHash(ctx, ds, n) if err != nil { - n.rec.Log.Error( + n.logger.Error( err, "Failed to get daemonset template revision hash", "daemonset", ds) return gpuv1.NotReady } - n.rec.Log.V(2).Info("daemonset template revision hash", "hash", daemonsetRevisionHash) + n.logger.V(2).Info("daemonset template revision hash", "hash", daemonsetRevisionHash) for _, pod := range dsPods { pod := pod podRevisionHash, err := getPodControllerRevisionHash(ctx, &pod) if err != nil { - n.rec.Log.Error( + n.logger.Error( err, "Failed to get pod template revision hash", "pod", pod) return gpuv1.NotReady } - n.rec.Log.V(2).Info("pod template revision hash", "hash", podRevisionHash) + n.logger.V(2).Info("pod template revision hash", "hash", podRevisionHash) // check if the revision hashes are matching and pod is in running state if podRevisionHash != daemonsetRevisionHash || pod.Status.Phase != "Running" { @@ -3505,13 +3606,13 @@ func getPodsOwnedbyDaemonset(ds *appsv1.DaemonSet, pods []corev1.Pod, n ClusterP dsPodList := []corev1.Pod{} for _, pod := range pods { if pod.OwnerReferences == nil || len(pod.OwnerReferences) < 1 { - n.rec.Log.Info("Driver Pod has no owner DaemonSet", "pod", pod.Name) + n.logger.Info("Driver Pod has no owner DaemonSet", "pod", pod.Name) continue } - n.rec.Log.V(2).Info("Pod", "pod", pod.Name, "owner", pod.OwnerReferences[0].Name) + n.logger.V(2).Info("Pod", "pod", pod.Name, "owner", pod.OwnerReferences[0].Name) if ds.UID != pod.OwnerReferences[0].UID { - n.rec.Log.Info("Driver Pod is not owned by a Driver DaemonSet", + n.logger.Info("Driver Pod is not owned by a Driver DaemonSet", "pod", pod, "actual owner", pod.OwnerReferences[0]) continue } @@ -3535,12 +3636,12 @@ func getDaemonsetControllerRevisionHash(ctx context.Context, daemonset *appsv1.D client.InNamespace(n.operatorNamespace), } list := &appsv1.ControllerRevisionList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { return "", fmt.Errorf("error getting controller revision list for daemonset %s: %v", daemonset.Name, err) } - n.rec.Log.V(2).Info("obtained controller revisions", "Daemonset", daemonset.Name, "len", len(list.Items)) + n.logger.V(2).Info("obtained controller revisions", "Daemonset", daemonset.Name, "len", len(list.Items)) var revisions []appsv1.ControllerRevision for _, controllerRevision := range list.Items { @@ -3569,11 +3670,11 @@ func Deployment(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].Deployment.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("Deployment", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("Deployment", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -3581,14 +3682,14 @@ func Deployment(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -3607,22 +3708,22 @@ func ocpHasDriverToolkitImageStream(n *ClusterPolicyController) (bool, error) { ctx := n.ctx found := &apiimagev1.ImageStream{} name := "driver-toolkit" - namespace := "openshift" - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, found) + namespace := consts.OpenshiftNamespace + err := n.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, found) if err != nil { if apierrors.IsNotFound(err) { - n.rec.Log.Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream not found", + n.logger.Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream not found", "Name", name, "Namespace", namespace) return false, nil } - n.rec.Log.Info("Couldn't get the driver-toolkit imagestream", "Error", err) + n.logger.Info("Couldn't get the driver-toolkit imagestream", "Error", err) return false, err } - n.rec.Log.V(1).Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream found") + n.logger.V(1).Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream found") isBroken := false for _, tag := range found.Spec.Tags { if tag.Name == "" { @@ -3632,11 +3733,11 @@ func ocpHasDriverToolkitImageStream(n *ClusterPolicyController) (bool, error) { if tag.Name == "latest" || tag.From == nil { continue } - n.rec.Log.V(1).Info("ocpHasDriverToolkitImageStream: tag", tag.Name, tag.From.Name) + n.logger.V(1).Info("ocpHasDriverToolkitImageStream: tag", tag.Name, tag.From.Name) n.ocpDriverToolkit.rhcosDriverToolkitImages[tag.Name] = tag.From.Name } if isBroken { - n.rec.Log.Info("WARNING: ocpHasDriverToolkitImageStream: driver-toolkit imagestream is broken, see RHBZ#2015024") + n.logger.Info("WARNING: ocpHasDriverToolkitImageStream: driver-toolkit imagestream is broken, see RHBZ#2015024") n.operatorMetrics.openshiftDriverToolkitIsBroken.Set(1) } else { @@ -3653,7 +3754,7 @@ func (n ClusterPolicyController) cleanupAllDriverDaemonSets(ctx context.Context) // is allowed when specifying ListOptions or DeleteOptions. // See GH issue: https://github.com/kubernetes-sigs/controller-runtime/issues/612 list := &appsv1.DaemonSetList{} - err := n.rec.Client.List(ctx, list, client.MatchingFields{clusterPolicyControllerIndexKey: n.singleton.Name}) + err := n.client.List(ctx, list, client.MatchingFields{clusterPolicyControllerIndexKey: n.singleton.Name}) if err != nil { return fmt.Errorf("failed to list all NVIDIA driver daemonsets owned by ClusterPolicy: %w", err) } @@ -3662,8 +3763,8 @@ func (n ClusterPolicyController) cleanupAllDriverDaemonSets(ctx context.Context) ds := ds // filter out DaemonSets which are not the NVIDIA driver/vgpu-manager if strings.HasPrefix(ds.Name, commonDriverDaemonsetName) || strings.HasPrefix(ds.Name, commonVGPUManagerDaemonsetName) { - n.rec.Log.Info("Deleting NVIDIA driver daemonset owned by ClusterPolicy", "Name", ds.Name) - err = n.rec.Client.Delete(ctx, &ds) + n.logger.Info("Deleting NVIDIA driver daemonset owned by ClusterPolicy", "Name", ds.Name) + err = n.client.Delete(ctx, &ds) if err != nil { return fmt.Errorf("error deleting NVIDIA driver daemonset: %w", err) } @@ -3683,32 +3784,37 @@ func (n ClusterPolicyController) cleanupStalePrecompiledDaemonsets(ctx context.C }, } list := &appsv1.DaemonSetList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Error(err, "could not get daemonset list") + n.logger.Error(err, "could not get daemonset list") return err } for idx := range list.Items { - name := list.Items[idx].ObjectMeta.Name - desiredNumberScheduled := list.Items[idx].Status.DesiredNumberScheduled + ds := list.Items[idx] + name := ds.ObjectMeta.Name + desiredNumberScheduled := ds.Status.DesiredNumberScheduled + numberMisscheduled := ds.Status.NumberMisscheduled - n.rec.Log.V(1).Info("Driver DaemonSet found", + n.logger.V(1).Info("Driver DaemonSet found", "Name", name, - "desiredNumberScheduled", desiredNumberScheduled) + "Status.DesiredNumberScheduled", desiredNumberScheduled) - if desiredNumberScheduled != 0 { - n.rec.Log.Info("Driver DaemonSet active, keep it.", - "Name", name, "Status.DesiredNumberScheduled", desiredNumberScheduled) - continue - } + // We consider a daemonset to be stale only if it has no desired number of pods and no pods currently mis-scheduled + // As per the Kubernetes docs, a daemonset pod is mis-scheduled when an already scheduled pod no longer satisfies + // node affinity constraints or has un-tolerated taints, for e.g. "node.kubernetes.io/unreachable:NoSchedule" + if desiredNumberScheduled == 0 && numberMisscheduled == 0 { + n.logger.Info("Delete Driver DaemonSet", "Name", name) - n.rec.Log.Info("Delete Driver DaemonSet", "Name", name) - - err = n.rec.Client.Delete(ctx, &list.Items[idx]) - if err != nil { - n.rec.Log.Info("ERROR: Could not get delete DaemonSet", - "Name", name, "Error", err) + err = n.client.Delete(ctx, &ds) + if err != nil { + n.logger.Error(err, "Could not get delete DaemonSet", + "Name", name) + } + } else { + n.logger.Info("Driver DaemonSet active, keep it.", + "Name", name, + "Status.DesiredNumberScheduled", desiredNumberScheduled) } } return nil @@ -3721,23 +3827,23 @@ func (n ClusterPolicyController) cleanupStalePrecompiledDaemonsets(ctx context.C func precompiledDriverDaemonsets(ctx context.Context, n ClusterPolicyController) (gpuv1.State, []error) { overallState := gpuv1.Ready var errs []error - n.rec.Log.Info("cleaning any stale precompiled driver daemonsets") + n.logger.Info("cleaning any stale precompiled driver daemonsets") err := n.cleanupStalePrecompiledDaemonsets(ctx) if err != nil { return gpuv1.NotReady, append(errs, err) } - n.rec.Log.V(1).Info("preparing pre-compiled driver daemonsets") + n.logger.V(1).Info("preparing pre-compiled driver daemonsets") for kernelVersion, os := range n.kernelVersionMap { // set current kernel version n.currentKernelVersion = kernelVersion - n.rec.Log.Info("preparing pre-compiled driver daemonset", + n.logger.Info("preparing pre-compiled driver daemonset", "version", n.currentKernelVersion, "os", os) state, err := DaemonSet(n) if state != gpuv1.Ready { - n.rec.Log.Info("pre-compiled driver daemonset not ready", + n.logger.Info("pre-compiled driver daemonset not ready", "version", n.currentKernelVersion, "state", state) overallState = state } @@ -3761,7 +3867,7 @@ func (n ClusterPolicyController) ocpDriverToolkitDaemonSets(ctx context.Context) return gpuv1.NotReady, err } - n.rec.Log.V(1).Info("preparing DriverToolkit DaemonSet", + n.logger.V(1).Info("preparing DriverToolkit DaemonSet", "rhcos", n.ocpDriverToolkit.rhcosVersions) overallState := gpuv1.Ready @@ -3770,12 +3876,12 @@ func (n ClusterPolicyController) ocpDriverToolkitDaemonSets(ctx context.Context) for rhcosVersion := range n.ocpDriverToolkit.rhcosVersions { n.ocpDriverToolkit.currentRhcosVersion = rhcosVersion - n.rec.Log.V(1).Info("preparing DriverToolkit DaemonSet", + n.logger.V(1).Info("preparing DriverToolkit DaemonSet", "rhcosVersion", n.ocpDriverToolkit.currentRhcosVersion) state, err := DaemonSet(n) - n.rec.Log.V(1).Info("preparing DriverToolkit DaemonSet", + n.logger.V(1).Info("preparing DriverToolkit DaemonSet", "rhcosVersion", n.ocpDriverToolkit.currentRhcosVersion, "state", state) if state != gpuv1.Ready { overallState = state @@ -3796,7 +3902,7 @@ func (n ClusterPolicyController) ocpDriverToolkitDaemonSets(ctx context.Context) if image != "" { continue } - n.rec.Log.Info("WARNINGs: RHCOS driver-toolkit image missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) + n.logger.Info("WARNINGs: RHCOS driver-toolkit image missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) tagsMissing = true } if tagsMissing { @@ -3823,9 +3929,9 @@ func (n ClusterPolicyController) ocpCleanupStaleDriverToolkitDaemonSets(ctx cont } list := &appsv1.DaemonSetList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("ERROR: Could not get DaemonSetList", "Error", err) + n.logger.Info("ERROR: Could not get DaemonSetList", "Error", err) return err } @@ -3835,30 +3941,30 @@ func (n ClusterPolicyController) ocpCleanupStaleDriverToolkitDaemonSets(ctx cont clusterHasRhcosVersion, clusterOk := n.ocpDriverToolkit.rhcosVersions[dsRhcosVersion] desiredNumberScheduled := list.Items[idx].Status.DesiredNumberScheduled - n.rec.Log.V(1).Info("Driver DaemonSet found", + n.logger.V(1).Info("Driver DaemonSet found", "Name", name, "dsRhcosVersion", dsRhcosVersion, "clusterHasRhcosVersion", clusterHasRhcosVersion, "desiredNumberScheduled", desiredNumberScheduled) if desiredNumberScheduled != 0 { - n.rec.Log.Info("Driver DaemonSet active, keep it.", + n.logger.Info("Driver DaemonSet active, keep it.", "Name", name, "Status.DesiredNumberScheduled", desiredNumberScheduled) continue } if !versionOk { - n.rec.Log.Info("WARNING: Driver DaemonSet doesn't have DriverToolkit version label", + n.logger.Info("WARNING: Driver DaemonSet doesn't have DriverToolkit version label", "Name", name, "Label", ocpDriverToolkitVersionLabel, ) } else { switch { case !clusterOk: - n.rec.Log.V(1).Info("Driver DaemonSet RHCOS version NOT part of the cluster", + n.logger.V(1).Info("Driver DaemonSet RHCOS version NOT part of the cluster", "Name", name, "RHCOS version", dsRhcosVersion, ) case clusterHasRhcosVersion: - n.rec.Log.V(1).Info("Driver DaemonSet RHCOS version is part of the cluster, keep it.", + n.logger.V(1).Info("Driver DaemonSet RHCOS version is part of the cluster, keep it.", "Name", name, "RHCOS version", dsRhcosVersion, ) @@ -3868,16 +3974,16 @@ func (n ClusterPolicyController) ocpCleanupStaleDriverToolkitDaemonSets(ctx cont continue default: /* clusterHasRhcosVersion == false */ // currently unexpected - n.rec.Log.V(1).Info("Driver DaemonSet RHCOS version marked for deletion", + n.logger.V(1).Info("Driver DaemonSet RHCOS version marked for deletion", "Name", name, "RHCOS version", dsRhcosVersion, ) } } - n.rec.Log.Info("Delete Driver DaemonSet", "Name", name) - err = n.rec.Client.Delete(ctx, &list.Items[idx]) + n.logger.Info("Delete Driver DaemonSet", "Name", name) + err = n.client.Delete(ctx, &list.Items[idx]) if err != nil { - n.rec.Log.Info("ERROR: Could not get delete DaemonSet", + n.logger.Info("ERROR: Could not get delete DaemonSet", "Name", name, "Error", err) return err } @@ -4006,22 +4112,22 @@ func (n ClusterPolicyController) cleanupDriverDaemonsets(ctx context.Context, se var opts = []client.ListOption{client.MatchingLabels{searchKey: searchValue}} dsList := &appsv1.DaemonSetList{} - if err := n.rec.Client.List(ctx, dsList, opts...); err != nil { - n.rec.Log.Error(err, "Could not get DaemonSetList") + if err := n.client.List(ctx, dsList, opts...); err != nil { + n.logger.Error(err, "Could not get DaemonSetList") return 0, err } var lastErr error for idx := range dsList.Items { - n.rec.Log.Info("Delete DaemonSet", + n.logger.Info("Delete DaemonSet", "Name", dsList.Items[idx].ObjectMeta.Name, ) // ignore daemonsets that doesn't match the required name if !strings.HasPrefix(dsList.Items[idx].ObjectMeta.Name, namePrefix) { continue } - if err := n.rec.Client.Delete(ctx, &dsList.Items[idx]); err != nil { - n.rec.Log.Error(err, "Could not get delete DaemonSet", + if err := n.client.Delete(ctx, &dsList.Items[idx]); err != nil { + n.logger.Error(err, "Could not get delete DaemonSet", "Name", dsList.Items[idx].ObjectMeta.Name) lastErr = err } @@ -4033,8 +4139,8 @@ func (n ClusterPolicyController) cleanupDriverDaemonsets(ctx context.Context, se } podList := &corev1.PodList{} - if err := n.rec.Client.List(ctx, podList, opts...); err != nil { - n.rec.Log.Info("ERROR: Could not get PodList", "Error", err) + if err := n.client.List(ctx, podList, opts...); err != nil { + n.logger.Info("ERROR: Could not get PodList", "Error", err) return 0, err } @@ -4056,11 +4162,11 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].DaemonSet.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("DaemonSet", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("DaemonSet", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4134,7 +4240,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.NotReady, err } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { logger.Info("SetControllerReference failed", "Error", err) return gpuv1.NotReady, err } @@ -4157,7 +4263,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { } found := &appsv1.DaemonSet{} - err = n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err = n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("DaemonSet not found, creating", "Name", obj.Name, @@ -4166,7 +4272,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { hashStr := getDaemonsetHash(obj) // add annotation to the Daemonset with hash value during creation obj.Annotations[NvidiaAnnotationHashKey] = hashStr - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create DaemonSet", "Name", obj.Name, @@ -4185,7 +4291,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { changed := isDaemonsetSpecChanged(found, obj) if changed { logger.Info("DaemonSet is different, updating", "name", obj.ObjectMeta.Name) - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { return gpuv1.NotReady, err } @@ -4248,13 +4354,13 @@ func isPodReady(name string, n ClusterPolicyController, phase corev1.PodPhase) g ctx := n.ctx opts := []client.ListOption{&client.MatchingLabels{"app": name}} - n.rec.Log.V(1).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) + n.logger.V(1).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) list := &corev1.PodList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get PodList", err) + n.logger.Info("Could not get PodList", err) } - n.rec.Log.V(1).Info("Pod", "NumberOfPods", len(list.Items)) + n.logger.V(1).Info("Pod", "NumberOfPods", len(list.Items)) if len(list.Items) == 0 { return gpuv1.NotReady } @@ -4262,10 +4368,10 @@ func isPodReady(name string, n ClusterPolicyController, phase corev1.PodPhase) g pd := list.Items[0] if pd.Status.Phase != phase { - n.rec.Log.V(1).Info("Pod", "Phase", pd.Status.Phase, "!=", phase) + n.logger.V(1).Info("Pod", "Phase", pd.Status.Phase, "!=", phase) return gpuv1.NotReady } - n.rec.Log.V(1).Info("Pod", "Phase", pd.Status.Phase, "==", phase) + n.logger.V(1).Info("Pod", "Phase", pd.Status.Phase, "==", phase) return gpuv1.Ready } @@ -4276,11 +4382,11 @@ func SecurityContextConstraints(n ClusterPolicyController) (gpuv1.State, error) obj := n.resources[state].SecurityContextConstraints.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("SecurityContextConstraints", obj.Name, "Namespace", "default") + logger := n.logger.WithValues("SecurityContextConstraints", obj.Name, "Namespace", "default") // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4295,20 +4401,15 @@ func SecurityContextConstraints(n ClusterPolicyController) (gpuv1.State, error) obj.Users[idx] = fmt.Sprintf("system:serviceaccount:%s:%s", obj.Namespace, obj.Name) } - // Allow hostNetwork only when a separate standalone DCGM engine is deployed for communication - if obj.Name == "nvidia-dcgm-exporter" && n.singleton.Spec.DCGM.IsEnabled() { - obj.AllowHostNetwork = true - } - - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &secv1.SecurityContextConstraints{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4321,7 +4422,7 @@ func SecurityContextConstraints(n ClusterPolicyController) (gpuv1.State, error) logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4337,11 +4438,11 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("Service", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("Service", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4349,15 +4450,15 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &corev1.Service{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4371,7 +4472,7 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { obj.ResourceVersion = found.ResourceVersion obj.Spec.ClusterIP = found.Spec.ClusterIP - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4381,7 +4482,7 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { func crdExists(n ClusterPolicyController, name string) (bool, error) { crd := &apiextensionsv1.CustomResourceDefinition{} - err := n.rec.Client.Get(n.ctx, client.ObjectKey{Name: name}, crd) + err := n.client.Get(n.ctx, client.ObjectKey{Name: name}, crd) if err != nil && apierrors.IsNotFound(err) { return false, nil } else if err != nil { @@ -4398,7 +4499,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ServiceMonitor.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ServiceMonitor", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ServiceMonitor", obj.Name, "Namespace", obj.Namespace) // Check if ServiceMonitor is a valid kind serviceMonitorCRDExists, err := crdExists(n, ServiceMonitorCRDName) @@ -4411,7 +4512,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { if !serviceMonitorCRDExists { return gpuv1.Ready, nil } - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4426,7 +4527,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { if !serviceMonitorCRDExists { return gpuv1.Ready, nil } - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4453,9 +4554,14 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { obj.ObjectMeta.Labels[key] = value } } - if serviceMonitor.Relabelings != nil { - obj.Spec.Endpoints[0].RelabelConfigs = serviceMonitor.Relabelings + relabelConfigs := make([]promv1.RelabelConfig, len(serviceMonitor.Relabelings)) + for i, relabel := range serviceMonitor.Relabelings { + if relabel != nil { + relabelConfigs[i] = *relabel + } + } + obj.Spec.Endpoints[0].RelabelConfigs = relabelConfigs } } if n.stateNames[state] == "state-operator-metrics" || n.stateNames[state] == "state-node-status-exporter" { @@ -4474,15 +4580,15 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { obj.Spec.NamespaceSelector.MatchNames[idx] = obj.Namespace } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &promv1.ServiceMonitor{} - err = n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err = n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4495,7 +4601,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4519,17 +4625,17 @@ func transformRuntimeClassLegacy(n ClusterPolicyController, spec nodev1.RuntimeC obj.Labels = spec.Labels - logger := n.rec.Log.WithValues("RuntimeClass", obj.Name) + logger := n.logger.WithValues("RuntimeClass", obj.Name) - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &nodev1beta1.RuntimeClass{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4542,7 +4648,7 @@ func transformRuntimeClassLegacy(n ClusterPolicyController, spec nodev1.RuntimeC logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4566,17 +4672,17 @@ func transformRuntimeClass(n ClusterPolicyController, spec nodev1.RuntimeClass) obj.Labels = spec.Labels - logger := n.rec.Log.WithValues("RuntimeClass", obj.Name) + logger := n.logger.WithValues("RuntimeClass", obj.Name) - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &nodev1.RuntimeClass{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4589,7 +4695,7 @@ func transformRuntimeClass(n ClusterPolicyController, spec nodev1.RuntimeClass) logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4605,20 +4711,20 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) // Get all existing Kata RuntimeClasses opts := []client.ListOption{&client.MatchingLabels{"nvidia.com/kata-runtime-class": "true"}} list := &nodev1.RuntimeClassList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get Kata RuntimeClassList", err) + n.logger.Info("Could not get Kata RuntimeClassList", err) return gpuv1.NotReady, fmt.Errorf("error getting kata RuntimeClassList: %v", err) } - n.rec.Log.V(1).Info("Kata RuntimeClasses", "Number", len(list.Items)) + n.logger.V(1).Info("Kata RuntimeClasses", "Number", len(list.Items)) if !config.KataManager.IsEnabled() { // Delete all Kata RuntimeClasses - n.rec.Log.Info("Kata Manager disabled, deleting all Kata RuntimeClasses") + n.logger.Info("Kata Manager disabled, deleting all Kata RuntimeClasses") for _, rc := range list.Items { rc := rc - n.rec.Log.V(1).Info("Deleting Kata RuntimeClass", "Name", rc.Name) - err := n.rec.Client.Delete(ctx, &rc) + n.logger.V(1).Info("Deleting Kata RuntimeClass", "Name", rc.Name) + err := n.client.Delete(ctx, &rc) if err != nil { return gpuv1.NotReady, fmt.Errorf("error deleting kata RuntimeClass '%s': %v", rc.Name, err) } @@ -4636,8 +4742,8 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) for _, rc := range list.Items { if _, ok := rcNames[rc.Name]; !ok { rc := rc - n.rec.Log.Info("Deleting Kata RuntimeClass", "Name", rc.Name) - err := n.rec.Client.Delete(ctx, &rc) + n.logger.Info("Deleting Kata RuntimeClass", "Name", rc.Name) + err := n.client.Delete(ctx, &rc) if err != nil { return gpuv1.NotReady, fmt.Errorf("error deleting kata RuntimeClass '%s': %v", rc.Name, err) } @@ -4647,7 +4753,7 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) // Using kata RuntimClass template, create / update RuntimeClass objects specified in KataManager configuration template := n.resources[state].RuntimeClasses[0] for _, rc := range config.KataManager.Config.RuntimeClasses { - logger := n.rec.Log.WithValues("RuntimeClass", rc.Name) + logger := n.logger.WithValues("RuntimeClass", rc.Name) if rc.Name == config.Operator.RuntimeClass { return gpuv1.NotReady, fmt.Errorf("error creating kata runtimeclass '%s' as it conflicts with the runtimeclass used for the gpu-operator operand pods itself", rc.Name) @@ -4670,15 +4776,15 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) } obj.Scheduling.NodeSelector = nodeSelector - if err := controllerutil.SetControllerReference(n.singleton, &obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, &obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &nodev1.RuntimeClass{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, &obj) + err = n.client.Create(ctx, &obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4691,7 +4797,7 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, &obj) + err = n.client.Update(ctx, &obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4719,9 +4825,9 @@ func RuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) { // 'nvidia-legacy' runtime classes. Delete these objects if they were // previously created. if !n.singleton.Spec.CDI.IsEnabled() && (obj.Name == "nvidia-cdi" || obj.Name == "nvidia-legacy") { - err := n.rec.Client.Delete(n.ctx, &obj) + err := n.client.Delete(n.ctx, &obj) if err != nil && !apierrors.IsNotFound(err) { - n.rec.Log.Info("Couldn't delete", "RuntimeClass", obj.Name, "Error", err) + n.logger.Info("Couldn't delete", "RuntimeClass", obj.Name, "Error", err) return gpuv1.NotReady, err } continue @@ -4744,17 +4850,17 @@ func PrometheusRule(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].PrometheusRule.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("PrometheusRule", obj.Name) + logger := n.logger.WithValues("PrometheusRule", obj.Name) - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &promv1.PrometheusRule{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4767,7 +4873,7 @@ func PrometheusRule(n ClusterPolicyController) (gpuv1.State, error) { logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err diff --git a/controllers/object_controls_test.go b/controllers/object_controls_test.go index 44c8f4195..ae17a9f3d 100644 --- a/controllers/object_controls_test.go +++ b/controllers/object_controls_test.go @@ -34,6 +34,7 @@ import ( nodev1 "k8s.io/api/node/v1" rbacv1 "k8s.io/api/rbac/v1" schedv1 "k8s.io/api/scheduling/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -45,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log/zap" - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) const ( @@ -55,6 +56,7 @@ const ( vGPUManagerAssetsPath = "assets/state-vgpu-manager/" sandboxDevicePluginAssetsPath = "assets/state-sandbox-device-plugin" devicePluginAssetsPath = "assets/state-device-plugin/" + dcgmExporterAssetsPath = "assets/state-dcgm-exporter/" nfdNvidiaPCILabelKey = "feature.node.kubernetes.io/pci-10de.present" upgradedKernel = "5.4.135-generic" ) @@ -67,7 +69,6 @@ type testConfig struct { var ( cfg *testConfig clusterPolicyController ClusterPolicyController - clusterPolicyReconciler ClusterPolicyReconciler clusterPolicy gpuv1.ClusterPolicy boolTrue *bool boolFalse *bool @@ -165,6 +166,9 @@ func setup() error { if err := promv1.AddToScheme(s); err != nil { return fmt.Errorf("unable to add promv1 schema: %v", err) } + if err := apiextensionsv1.AddToScheme(s); err != nil { + return fmt.Errorf("unable to add apiextensionsv1 schema: %v", err) + } if err := secv1.Install(s); err != nil { return fmt.Errorf("unable to add secv1 schema: %v", err) } @@ -201,16 +205,12 @@ func setup() error { } ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) - clusterPolicyReconciler = ClusterPolicyReconciler{ - Client: client, - Log: ctrl.Log.WithName("controller").WithName("ClusterPolicy"), - Scheme: s, - } - clusterPolicyController = ClusterPolicyController{ ctx: ctx, singleton: cp, - rec: &clusterPolicyReconciler, + client: client, + logger: ctrl.Log.WithName("controller").WithName("ClusterPolicy"), + scheme: s, } clusterPolicyController.operatorMetrics = initOperatorMetrics(&clusterPolicyController) @@ -271,7 +271,7 @@ func newCluster(nodes int, s *runtime.Scheme) (client.Client, error) { // updateClusterPolicy updates an existing ClusterPolicy instance func updateClusterPolicy(n *ClusterPolicyController, cp *gpuv1.ClusterPolicy) error { n.singleton = cp - err := n.rec.Client.Update(n.ctx, cp) + err := n.client.Update(n.ctx, cp) if err != nil && !apierrors.IsConflict(err) { return fmt.Errorf("failed to update ClusterPolicy: %v", err) } @@ -285,7 +285,7 @@ func removeState(n *ClusterPolicyController, idx int) error { var err error for _, res := range kubernetesResources { // TODO: use n.operatorNamespace once MR is merged - err = n.rec.Client.DeleteAllOf(n.ctx, res) + err = n.client.DeleteAllOf(n.ctx, res) if err != nil { return fmt.Errorf("error deleting objects from k8s client: %v", err) } @@ -391,6 +391,24 @@ func testDaemonsetCommon(t *testing.T, cp *gpuv1.ClusterPolicy, component string if err != nil { return nil, fmt.Errorf("unable to get mainCtrImage for sandbox-device-plugin: %v", err) } + case "DCGMExporter": + spec = commonDaemonsetSpec{ + repository: cp.Spec.DCGMExporter.Repository, + image: cp.Spec.DCGMExporter.Image, + version: cp.Spec.DCGMExporter.Version, + imagePullPolicy: cp.Spec.DCGMExporter.ImagePullPolicy, + imagePullSecrets: getImagePullSecrets(cp.Spec.DCGMExporter.ImagePullSecrets), + args: cp.Spec.DCGMExporter.Args, + env: cp.Spec.DCGMExporter.Env, + resources: cp.Spec.DCGMExporter.Resources, + } + dsLabel = "nvidia-dcgm-exporter" + mainCtrName = "nvidia-dcgm-exporter" + manifestFile = filepath.Join(cfg.root, dcgmExporterAssetsPath) + mainCtrImage, err = gpuv1.ImagePath(&cp.Spec.DCGMExporter) + if err != nil { + return nil, fmt.Errorf("unable to get mainCtrImage for dcgm-exporter: %v", err) + } default: return nil, fmt.Errorf("invalid component for testDaemonsetCommon(): %s", component) } @@ -414,7 +432,7 @@ func testDaemonsetCommon(t *testing.T, cp *gpuv1.ClusterPolicy, component string client.MatchingLabels{"app": dsLabel}, } list := &appsv1.DaemonSetList{} - err = clusterPolicyController.rec.Client.List(ctx, list, opts...) + err = clusterPolicyController.client.List(ctx, list, opts...) if err != nil { t.Fatalf("could not get DaemonSetList from client: %v", err) } @@ -1001,3 +1019,117 @@ func TestIsOpenKernelModulesRequired(t *testing.T) { }) } } + +// getDCGMExporterTestInput return a ClusterPolicy instance for a particular +// dcgm-exporter test case. +func getDCGMExporterTestInput(testCase string) *gpuv1.ClusterPolicy { + cp := clusterPolicy.DeepCopy() + + // Set some default values + cp.Spec.DCGMExporter.Repository = "nvcr.io/nvidia/k8s" + cp.Spec.DCGMExporter.Image = "dcgm-exporter" + cp.Spec.DCGMExporter.Version = "3.3.0-3.2.0-ubuntu22.04" + cp.Spec.DCGMExporter.ImagePullSecrets = []string{"ngc-secret"} + + cp.Spec.Validator.Repository = "nvcr.io/nvidia/cloud-native" + cp.Spec.Validator.Image = "gpu-operator-validator" + cp.Spec.Validator.Version = "v23.9.2" + cp.Spec.Validator.ImagePullSecrets = []string{"ngc-secret"} + + switch testCase { + case "default": + // Do nothing + case "standalone-dcgm": + dcgmEnabled := true + cp.Spec.DCGM.Enabled = &dcgmEnabled + default: + return nil + } + + return cp +} + +// getDCGMExporterTestOutput returns a map containing expected output for +// dcgm-exporter test case. +func getDCGMExporterTestOutput(testCase string) map[string]interface{} { + // default output + output := map[string]interface{}{ + "numDaemonsets": 1, + "dcgmExporterImage": "nvcr.io/nvidia/k8s/dcgm-exporter:3.3.0-3.2.0-ubuntu22.04", + "imagePullSecret": "ngc-secret", + } + + switch testCase { + case "default": + output["env"] = map[string]string{} + case "standalone-dcgm": + output["env"] = map[string]string{ + "DCGM_REMOTE_HOSTENGINE_INFO": "nvidia-dcgm:5555", + } + default: + return nil + } + + return output +} + +// TestDCGMExporter tests that the GPU Operator correctly deploys the dcgm-exporter daemonset +// under various scenarios/config options +func TestDCGMExporter(t *testing.T) { + testCases := []struct { + description string + clusterPolicy *gpuv1.ClusterPolicy + output map[string]interface{} + }{ + { + "Default", + getDCGMExporterTestInput("default"), + getDCGMExporterTestOutput("default"), + }, + { + "StandalongDCGM", + getDCGMExporterTestInput("standalone-dcgm"), + getDCGMExporterTestOutput("standalone-dcgm"), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + ds, err := testDaemonsetCommon(t, tc.clusterPolicy, "DCGMExporter", tc.output["numDaemonsets"].(int)) + if err != nil { + t.Fatalf("error in testDaemonsetCommon(): %v", err) + } + if ds == nil { + return + } + + dcgmExporterImage := "" + for _, container := range ds.Spec.Template.Spec.Containers { + if container.Name == "nvidia-dcgm-exporter" { + dcgmExporterImage = container.Image + break + } + } + for key, value := range tc.output["env"].(map[string]string) { + envFound := false + for _, envVar := range ds.Spec.Template.Spec.Containers[0].Env { + if envVar.Name == key && envVar.Value == value { + envFound = true + } + } + if !envFound { + t.Fatalf("Expected env is not set for daemonset nvidia-dcgm-exporter %s->%s", key, value) + } + } + + require.Equal(t, tc.output["dcgmExporterImage"], dcgmExporterImage, "Unexpected configuration for dcgm-exporter image") + + // cleanup by deleting all kubernetes objects + err = removeState(&clusterPolicyController, clusterPolicyController.idx-1) + if err != nil { + t.Fatalf("error removing state %v:", err) + } + clusterPolicyController.idx-- + }) + } +} diff --git a/controllers/resource_manager.go b/controllers/resource_manager.go index af4c1dbad..2789bfe3d 100644 --- a/controllers/resource_manager.go +++ b/controllers/resource_manager.go @@ -67,7 +67,7 @@ func filePathWalkDir(n *ClusterPolicyController, root string) ([]string, error) var files []string err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { - n.rec.Log.V(1).Info("error in filepath.Walk on %s: %v", root, err) + n.logger.V(1).Info("error in filepath.Walk on %s: %v", root, err) return nil } if !info.IsDir() { @@ -103,7 +103,7 @@ func addResourcesControls(n *ClusterPolicyController, path string) (Resources, c res := Resources{} ctrl := controlFunc{} - n.rec.Log.Info("Getting assets from: ", "path:", path) + n.logger.Info("Getting assets from: ", "path:", path) manifests := getAssetsFrom(n, path, n.openshift) s := json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme.Scheme, @@ -115,7 +115,7 @@ func addResourcesControls(n *ClusterPolicyController, path string) (Resources, c slce := strings.Split(kind, ":") kind = strings.TrimSpace(slce[1]) - n.rec.Log.V(1).Info("Looking for ", "Kind", kind, "in path:", path) + n.logger.V(1).Info("Looking for ", "Kind", kind, "in path:", path) switch kind { case "ServiceAccount": @@ -181,7 +181,7 @@ func addResourcesControls(n *ClusterPolicyController, path string) (Resources, c panicIfError(err) ctrl = append(ctrl, PrometheusRule) default: - n.rec.Log.Info("Unknown Resource", "Manifest", m, "Kind", kind) + n.logger.Info("Unknown Resource", "Manifest", m, "Kind", kind) } } diff --git a/controllers/state_manager.go b/controllers/state_manager.go index 796694780..9c1028ebc 100644 --- a/controllers/state_manager.go +++ b/controllers/state_manager.go @@ -23,23 +23,19 @@ import ( "path/filepath" "strings" - secv1 "github.com/openshift/api/security/v1" - promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" - "github.com/go-logr/logr" apiconfigv1 "github.com/openshift/api/config/v1" - apiimagev1 "github.com/openshift/api/image/v1" configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" "golang.org/x/mod/semver" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/discovery" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) const ( @@ -145,15 +141,18 @@ type OpenShiftDriverToolkit struct { // ClusterPolicyController represents clusterpolicy controller spec for GPU operator type ClusterPolicyController struct { + client client.Client + ctx context.Context singleton *gpuv1.ClusterPolicy + logger logr.Logger + scheme *runtime.Scheme operatorNamespace string resources []Resources controls []controlFunc stateNames []string operatorMetrics *OperatorMetrics - rec *ClusterPolicyReconciler idx int kernelVersionMap map[string]string currentKernelVersion string @@ -425,7 +424,7 @@ func (n *ClusterPolicyController) applyDriverAutoUpgradeAnnotation() error { // fetch all nodes opts := []client.ListOption{} list := &corev1.NodeList{} - err := n.rec.Client.List(n.ctx, list, opts...) + err := n.client.List(n.ctx, list, opts...) if err != nil { return fmt.Errorf("Unable to list nodes to check annotations, err %s", err.Error()) } @@ -465,9 +464,9 @@ func (n *ClusterPolicyController) applyDriverAutoUpgradeAnnotation() error { // remove annotation if value is null delete(node.ObjectMeta.Annotations, driverAutoUpgradeAnnotationKey) } - err := n.rec.Client.Update(n.ctx, &node) + err := n.client.Update(n.ctx, &node) if err != nil { - n.rec.Log.Info("Failed to update node state annotation on a node", + n.logger.Info("Failed to update node state annotation on a node", "node", node.Name, "annotationKey", driverAutoUpgradeAnnotationKey, "annotationValue", value, "error", err) @@ -484,7 +483,7 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { // fetch all nodes opts := []client.ListOption{} list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { return false, 0, fmt.Errorf("Unable to list nodes to check labels, err %s", err.Error()) } @@ -501,16 +500,16 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { } config, err := getWorkloadConfig(labels, n.sandboxEnabled) if err != nil { - n.rec.Log.Info("WARNING: failed to get GPU workload config for node; using default", + n.logger.Info("WARNING: failed to get GPU workload config for node; using default", "NodeName", node.ObjectMeta.Name, "SandboxEnabled", n.sandboxEnabled, "Error", err, "defaultGPUWorkloadConfig", defaultGPUWorkloadConfig) } - n.rec.Log.Info("GPU workload configuration", "NodeName", node.ObjectMeta.Name, "GpuWorkloadConfig", config) - gpuWorkloadConfig := &gpuWorkloadConfiguration{config, node.ObjectMeta.Name, n.rec.Log} + n.logger.Info("GPU workload configuration", "NodeName", node.ObjectMeta.Name, "GpuWorkloadConfig", config) + gpuWorkloadConfig := &gpuWorkloadConfiguration{config, node.ObjectMeta.Name, n.logger} if !hasCommonGPULabel(labels) && hasGPULabels(labels) { - n.rec.Log.Info("Node has GPU(s)", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Node has GPU(s)", "NodeName", node.ObjectMeta.Name) // label the node with common Nvidia GPU label - n.rec.Log.Info("Setting node label", "NodeName", node.ObjectMeta.Name, "Label", commonGPULabelKey, "Value", commonGPULabelValue) + n.logger.Info("Setting node label", "NodeName", node.ObjectMeta.Name, "Label", commonGPULabelKey, "Value", commonGPULabelValue) labels[commonGPULabelKey] = commonGPULabelValue // update node labels node.SetLabels(labels) @@ -518,10 +517,10 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { } else if hasCommonGPULabel(labels) && !hasGPULabels(labels) { // previously labelled node and no longer has GPU's // label node to reset common Nvidia GPU label - n.rec.Log.Info("Node no longer has GPUs", "NodeName", node.ObjectMeta.Name) - n.rec.Log.Info("Setting node label", "Label", commonGPULabelKey, "Value", "false") + n.logger.Info("Node no longer has GPUs", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Setting node label", "Label", commonGPULabelKey, "Value", "false") labels[commonGPULabelKey] = "false" - n.rec.Log.Info("Disabling all operands for node", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Disabling all operands for node", "NodeName", node.ObjectMeta.Name) removeAllGPUStateLabels(labels) // update node labels node.SetLabels(labels) @@ -530,16 +529,16 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { if hasCommonGPULabel(labels) { // If node has GPU, then add state labels as per the workload type - n.rec.Log.Info("Checking GPU state labels on the node", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Checking GPU state labels on the node", "NodeName", node.ObjectMeta.Name) if gpuWorkloadConfig.updateGPUStateLabels(labels) { - n.rec.Log.Info("Applying correct GPU state labels to the node", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Applying correct GPU state labels to the node", "NodeName", node.ObjectMeta.Name) node.SetLabels(labels) updateLabels = true } // Disable MIG on the node explicitly where no MIG config is specified if n.singleton.Spec.MIGManager.IsEnabled() && hasMIGCapableGPU(labels) && !hasMIGConfigLabel(labels) { if n.singleton.Spec.MIGManager.Config != nil && n.singleton.Spec.MIGManager.Config.Default == migConfigDisabledValue { - n.rec.Log.Info("Setting MIG config label", "NodeName", node.ObjectMeta.Name, "Label", migConfigLabelKey, "Value", migConfigDisabledValue) + n.logger.Info("Setting MIG config label", "NodeName", node.ObjectMeta.Name, "Label", migConfigLabelKey, "Value", migConfigDisabledValue) labels[migConfigLabelKey] = migConfigDisabledValue node.SetLabels(labels) updateLabels = true @@ -553,12 +552,12 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { rhcosVersion, ok := labels[nfdOSTreeVersionLabelKey] if ok { n.ocpDriverToolkit.rhcosVersions[rhcosVersion] = true - n.rec.Log.V(1).Info("GPU node running RHCOS", + n.logger.V(1).Info("GPU node running RHCOS", "nodeName", node.ObjectMeta.Name, "RHCOS version", rhcosVersion, ) } else { - n.rec.Log.Info("node doesn't have the proper NFD RHCOS version label.", + n.logger.Info("node doesn't have the proper NFD RHCOS version label.", "nodeName", node.ObjectMeta.Name, "nfdLabel", nfdOSTreeVersionLabelKey, ) @@ -568,7 +567,7 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { // update node with the latest labels if updateLabels { - err = n.rec.Client.Update(ctx, &node) + err = n.client.Update(ctx, &node) if err != nil { return false, 0, fmt.Errorf("Unable to label node %s for the GPU Operator deployment, err %s", node.ObjectMeta.Name, err.Error()) @@ -576,7 +575,7 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { } } // end node loop - n.rec.Log.Info("Number of nodes with GPU label", "NodeCount", gpuNodesTotal) + n.logger.Info("Number of nodes with GPU label", "NodeCount", gpuNodesTotal) n.operatorMetrics.gpuNodesTotal.Set(float64(gpuNodesTotal)) return clusterHasNFDLabels, gpuNodesTotal, nil } @@ -606,7 +605,7 @@ func (n *ClusterPolicyController) setPodSecurityLabelsForNamespace() error { // The GPU Operator is not installed in the suggested // namespace, so the namespace may be shared with other // untrusted operators. Do not set Pod Security Admission labels. - n.rec.Log.Info("GPU Operator is not installed in the suggested namespace. Not setting Pod Security Admission labels for namespace", + n.logger.Info("GPU Operator is not installed in the suggested namespace. Not setting Pod Security Admission labels for namespace", "namespace", namespaceName, "suggested namespace", ocpSuggestedNamespace) return nil @@ -614,7 +613,7 @@ func (n *ClusterPolicyController) setPodSecurityLabelsForNamespace() error { ns := &corev1.Namespace{} opts := client.ObjectKey{Name: namespaceName} - err := n.rec.Client.Get(ctx, opts, ns) + err := n.client.Get(ctx, opts, ns) if err != nil { return fmt.Errorf("ERROR: could not get Namespace %s from client: %v", namespaceName, err) } @@ -640,7 +639,7 @@ func (n *ClusterPolicyController) setPodSecurityLabelsForNamespace() error { return nil } - err = n.rec.Client.Patch(ctx, ns, patch) + err = n.client.Patch(ctx, ns, patch) if err != nil { return fmt.Errorf("unable to label namespace %s with pod security levels: %v", namespaceName, err) } @@ -657,7 +656,7 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { // namespace, so the namespace may be shared with other // untrusted operators. Do not enable namespace monitoring in // this case, as per OpenShift/Prometheus best practices. - n.rec.Log.Info("GPU Operator not installed in the suggested namespace, skipping namespace monitoring verification", + n.logger.Info("GPU Operator not installed in the suggested namespace, skipping namespace monitoring verification", "namespace", namespaceName, "suggested namespace", ocpSuggestedNamespace) return nil @@ -665,7 +664,7 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { ns := &corev1.Namespace{} opts := client.ObjectKey{Name: namespaceName} - err := n.rec.Client.Get(ctx, opts, ns) + err := n.client.Get(ctx, opts, ns) if err != nil { return fmt.Errorf("ERROR: could not get Namespace %s from client: %v", namespaceName, err) } @@ -679,7 +678,7 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { } else { msg = "WARNING: OpenShift monitoring currently disabled on user request" } - n.rec.Log.Info(msg, + n.logger.Info(msg, "namespace", namespaceName, "label", ocpNamespaceMonitoringLabelKey, "value", val, @@ -689,16 +688,16 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { } // label not defined, enable monitoring - n.rec.Log.Info("Enabling OpenShift monitoring") - n.rec.Log.V(1).Info("Adding monitoring label to the operator namespace", + n.logger.Info("Enabling OpenShift monitoring") + n.logger.V(1).Info("Adding monitoring label to the operator namespace", "namespace", namespaceName, "label", ocpNamespaceMonitoringLabelKey, "value", ocpNamespaceMonitoringLabelValue) - n.rec.Log.Info("Monitoring can be disabled by setting the namespace label " + + n.logger.Info("Monitoring can be disabled by setting the namespace label " + ocpNamespaceMonitoringLabelKey + "=false") patch := client.MergeFrom(ns.DeepCopy()) ns.ObjectMeta.Labels[ocpNamespaceMonitoringLabelKey] = ocpNamespaceMonitoringLabelValue - err = n.rec.Client.Patch(ctx, ns, patch) + err = n.client.Patch(ctx, ns, patch) if err != nil { return fmt.Errorf("Unable to label namespace %s for the GPU Operator monitoring, err %s", namespaceName, err.Error()) @@ -724,7 +723,7 @@ func (n *ClusterPolicyController) getRuntime() error { client.MatchingLabels{commonGPULabelKey: "true"}, } list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { return fmt.Errorf("Unable to list nodes prior to checking container runtime: %v", err) } @@ -733,7 +732,7 @@ func (n *ClusterPolicyController) getRuntime() error { for _, node := range list.Items { rt, err := getRuntimeString(node) if err != nil { - n.rec.Log.Info(fmt.Sprintf("Unable to get runtime info for node %s: %v", node.Name, err)) + n.logger.Info(fmt.Sprintf("Unable to get runtime info for node %s: %v", node.Name, err)) continue } runtime = rt @@ -744,7 +743,7 @@ func (n *ClusterPolicyController) getRuntime() error { } if runtime.String() == "" { - n.rec.Log.Info("Unable to get runtime info from the cluster, defaulting to containerd") + n.logger.Info("Unable to get runtime info from the cluster, defaulting to containerd") runtime = gpuv1.Containerd } n.runtime = runtime @@ -754,14 +753,16 @@ func (n *ClusterPolicyController) getRuntime() error { func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterPolicyReconciler, clusterPolicy *gpuv1.ClusterPolicy) error { n.singleton = clusterPolicy n.ctx = ctx - n.rec = reconciler n.idx = 0 + n.logger = reconciler.Log + n.client = reconciler.Client + n.scheme = reconciler.Scheme if len(n.controls) == 0 { clusterPolicyCtrl.operatorNamespace = os.Getenv("OPERATOR_NAMESPACE") if clusterPolicyCtrl.operatorNamespace == "" { - n.rec.Log.Error(nil, "OPERATOR_NAMESPACE environment variable not set, cannot proceed") + n.logger.Error(nil, "OPERATOR_NAMESPACE environment variable not set, cannot proceed") // we cannot do anything without the operator namespace, // let the operator Pod run into `CrashloopBackOff` @@ -782,15 +783,10 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP return fmt.Errorf("k8s version detected '%s' is not a valid semantic version", k8sVersion) } n.k8sVersion = k8sVersion - n.rec.Log.Info("Kubernetes version detected", "version", k8sVersion) - - utilruntime.Must(promv1.AddToScheme(reconciler.Scheme)) - utilruntime.Must(secv1.Install(reconciler.Scheme)) - utilruntime.Must(apiconfigv1.Install(reconciler.Scheme)) - utilruntime.Must(apiimagev1.Install(reconciler.Scheme)) + n.logger.Info("Kubernetes version detected", "version", k8sVersion) n.operatorMetrics = initOperatorMetrics(n) - n.rec.Log.Info("Operator metrics initialized.") + n.logger.Info("Operator metrics initialized.") addState(n, "/opt/gpu-operator/pre-requisites") addState(n, "/opt/gpu-operator/state-operator-metrics") @@ -821,13 +817,13 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP // workload configuration defaultWorkload := clusterPolicy.Spec.SandboxWorkloads.DefaultWorkload if isValidWorkloadConfig(defaultWorkload) { - n.rec.Log.Info("Default GPU workload is overridden in ClusterPolicy", "DefaultWorkload", defaultWorkload) + n.logger.Info("Default GPU workload is overridden in ClusterPolicy", "DefaultWorkload", defaultWorkload) defaultGPUWorkloadConfig = defaultWorkload } } else { n.sandboxEnabled = false } - n.rec.Log.Info("Sandbox workloads", "Enabled", n.sandboxEnabled, "DefaultWorkload", defaultGPUWorkloadConfig) + n.logger.Info("Sandbox workloads", "Enabled", n.sandboxEnabled, "DefaultWorkload", defaultGPUWorkloadConfig) if n.openshift != "" && (n.singleton.Spec.Operator.UseOpenShiftDriverToolkit == nil || *n.singleton.Spec.Operator.UseOpenShiftDriverToolkit) { @@ -849,12 +845,12 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP if clusterPolicy.Spec.PSA.IsEnabled() { // label namespace with Pod Security Admission levels - n.rec.Log.Info("Pod Security is enabled. Adding labels to GPU Operator namespace", "namespace", n.operatorNamespace) + n.logger.Info("Pod Security is enabled. Adding labels to GPU Operator namespace", "namespace", n.operatorNamespace) err := n.setPodSecurityLabelsForNamespace() if err != nil { return err } - n.rec.Log.Info("Pod Security Admission labels added to GPU Operator namespace", "namespace", n.operatorNamespace) + n.logger.Info("Pod Security Admission labels added to GPU Operator namespace", "namespace", n.operatorNamespace) } // fetch all nodes and label gpu nodes @@ -876,13 +872,13 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP if err != nil { return err } - n.rec.Log.Info(fmt.Sprintf("Using container runtime: %s", n.runtime.String())) + n.logger.Info(fmt.Sprintf("Using container runtime: %s", n.runtime.String())) // fetch all kernel versions from the GPU nodes in the cluster if n.singleton.Spec.Driver.IsEnabled() && n.singleton.Spec.Driver.UsePrecompiledDrivers() { kernelVersionMap, err := n.getKernelVersionsMap() if err != nil { - n.rec.Log.Info("Unable to obtain all kernel versions of the GPU nodes in the cluster", "err", err) + n.logger.Info("Unable to obtain all kernel versions of the GPU nodes in the cluster", "err", err) return err } n.kernelVersionMap = kernelVersionMap @@ -906,7 +902,7 @@ func (n *ClusterPolicyController) initOCPParams() error { } else if n.ocpDriverToolkit.requested { hasImageStream, err := ocpHasDriverToolkitImageStream(n) if err != nil { - n.rec.Log.Info("ocpHasDriverToolkitImageStream", "err", err) + n.logger.Info("ocpHasDriverToolkitImageStream", "err", err) return err } hasCompatibleNFD := len(n.ocpDriverToolkit.rhcosVersions) != 0 @@ -917,11 +913,11 @@ func (n *ClusterPolicyController) initOCPParams() error { } else { n.operatorMetrics.openshiftDriverToolkitEnabled.Set(openshiftDriverToolkitNotPossible) } - n.rec.Log.Info("OpenShift Driver Toolkit requested", + n.logger.Info("OpenShift Driver Toolkit requested", "hasCompatibleNFD", hasCompatibleNFD, "hasDriverToolkitImageStream", hasImageStream) - n.rec.Log.Info("OpenShift Driver Toolkit", + n.logger.Info("OpenShift Driver Toolkit", "enabled", n.ocpDriverToolkit.enabled) if hasImageStream { @@ -954,7 +950,7 @@ func (n *ClusterPolicyController) step() (gpuv1.State, error) { // updating / deleting objects owned by another controller. if (n.stateNames[n.idx] == "state-driver" || n.stateNames[n.idx] == "state-vgpu-manager") && n.singleton.Spec.Driver.UseNvdiaDriverCRDType() { - n.rec.Log.Info("NVIDIADriver CRD is enabled, cleaning up all NVIDIA driver daemonsets owned by ClusterPolicy") + n.logger.Info("NVIDIADriver CRD is enabled, cleaning up all NVIDIA driver daemonsets owned by ClusterPolicy") n.idx++ // Cleanup all driver daemonsets owned by ClusterPolicy. err := n.cleanupAllDriverDaemonSets(n.ctx) @@ -1032,7 +1028,7 @@ func (n ClusterPolicyController) isStateEnabled(stateName string) bool { case "state-operator-metrics": return true default: - n.rec.Log.Error(nil, "invalid state passed", "stateName", stateName) + n.logger.Error(nil, "invalid state passed", "stateName", stateName) return false } } diff --git a/controllers/state_manager_test.go b/controllers/state_manager_test.go index def5fea65..bdec856e0 100644 --- a/controllers/state_manager_test.go +++ b/controllers/state_manager_test.go @@ -21,7 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) func TestGetRuntimeString(t *testing.T) { diff --git a/controllers/transforms_test.go b/controllers/transforms_test.go new file mode 100644 index 000000000..83b504b7e --- /dev/null +++ b/controllers/transforms_test.go @@ -0,0 +1,1165 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controllers + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" +) + +// Daemonset is a DaemonSet wrapper used for testing +type Daemonset struct { + *appsv1.DaemonSet +} + +func NewDaemonset() Daemonset { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{}, + }, + }, + } + return Daemonset{ds} +} + +func (d Daemonset) WithHostPathVolume(name string, path string, hostPathType *corev1.HostPathType) Daemonset { + volume := corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: path, + Type: hostPathType, + }, + }, + } + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume) + return d +} + +func (d Daemonset) WithInitContainer(container corev1.Container) Daemonset { + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, container) + return d +} + +func (d Daemonset) WithContainer(container corev1.Container) Daemonset { + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, container) + return d +} + +func (d Daemonset) WithName(name string) Daemonset { + d.Name = name + return d +} + +func (d Daemonset) WithUpdateStrategy(strategy appsv1.DaemonSetUpdateStrategy) Daemonset { + d.Spec.UpdateStrategy = strategy + return d +} + +func (d Daemonset) WithPriorityClass(name string) Daemonset { + d.Spec.Template.Spec.PriorityClassName = name + return d +} + +func (d Daemonset) WithTolerations(tolerations []corev1.Toleration) Daemonset { + d.Spec.Template.Spec.Tolerations = tolerations + return d +} + +func (d Daemonset) WithPodLabels(labels map[string]string) Daemonset { + d.Spec.Template.Labels = labels + return d +} + +func (d Daemonset) WithPodAnnotations(annotations map[string]string) Daemonset { + d.Spec.Template.Annotations = annotations + return d +} + +func (d Daemonset) WithPullSecret(secret string) Daemonset { + d.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: secret}} + return d +} + +func (d Daemonset) WithRuntimeClassName(name string) Daemonset { + d.Spec.Template.Spec.RuntimeClassName = &name + return d +} + +// Pod is a Pod wrapper used for testing +type Pod struct { + *corev1.Pod +} + +func NewPod() Pod { + pod := &corev1.Pod{ + Spec: corev1.PodSpec{}, + } + return Pod{pod} +} + +func (p Pod) WithInitContainer(container corev1.Container) Pod { + p.Spec.InitContainers = append(p.Spec.InitContainers, container) + return p +} + +func (p Pod) WithRuntimeClassName(name string) Pod { + p.Spec.RuntimeClassName = &name + return p +} + +func TestTransformForHostRoot(t *testing.T) { + hostRootVolumeName := "host-root" + hostDevCharVolumeName := "host-dev-char" + testCases := []struct { + description string + hostRoot string + input Daemonset + expectedOutput Daemonset + }{ + { + description: "no host root or host-dev-char volume in daemonset", + hostRoot: "/custom-root", + input: NewDaemonset(), + expectedOutput: NewDaemonset(), + }, + { + description: "empty host root is a no-op", + hostRoot: "", + input: NewDaemonset(). + WithHostPathVolume(hostRootVolumeName, "/", nil). + WithHostPathVolume(hostDevCharVolumeName, "/", nil), + expectedOutput: NewDaemonset(). + WithHostPathVolume(hostRootVolumeName, "/", nil). + WithHostPathVolume(hostDevCharVolumeName, "/", nil), + }, + { + description: "custom host root with host-root and host-dev-char volumes", + hostRoot: "/custom-root", + input: NewDaemonset(). + WithHostPathVolume(hostRootVolumeName, "/", nil). + WithHostPathVolume(hostDevCharVolumeName, "/", nil). + WithContainer(corev1.Container{Name: "test-ctr"}), + expectedOutput: NewDaemonset(). + WithHostPathVolume(hostRootVolumeName, "/custom-root", nil). + WithHostPathVolume(hostDevCharVolumeName, "/custom-root/dev/char", nil). + WithContainer(corev1.Container{Name: "test-ctr", Env: []corev1.EnvVar{{Name: HostRootEnvName, Value: "/custom-root"}}}), + }, + { + description: "custom host root with host-root volume", + hostRoot: "/custom-root", + input: NewDaemonset(). + WithHostPathVolume(hostRootVolumeName, "/", nil). + WithContainer(corev1.Container{Name: "test-ctr"}), + expectedOutput: NewDaemonset(). + WithHostPathVolume(hostRootVolumeName, "/custom-root", nil). + WithContainer(corev1.Container{Name: "test-ctr", Env: []corev1.EnvVar{{Name: HostRootEnvName, Value: "/custom-root"}}}), + }, + { + description: "custom host root with host-dev-char volume", + hostRoot: "/custom-root", + input: NewDaemonset(). + WithHostPathVolume(hostDevCharVolumeName, "/", nil), + expectedOutput: NewDaemonset(). + WithHostPathVolume(hostDevCharVolumeName, "/custom-root/dev/char", nil), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + transformForHostRoot(tc.input.DaemonSet, tc.hostRoot) + require.EqualValues(t, tc.expectedOutput, tc.input) + }) + } +} + +func TestTransformForDriverInstallDir(t *testing.T) { + driverInstallDirVolumeName := "driver-install-dir" + testCases := []struct { + description string + driverInstallDir string + input Daemonset + expectedOutput Daemonset + }{ + { + description: "no driver-install-dir volume in daemonset", + driverInstallDir: "/custom-root", + input: NewDaemonset(), + expectedOutput: NewDaemonset(), + }, + { + description: "empty driverInstallDir is a no-op", + driverInstallDir: "", + input: NewDaemonset(). + WithHostPathVolume(driverInstallDirVolumeName, "/run/nvidia/driver", nil). + WithInitContainer( + corev1.Container{ + Name: "driver-validation", + VolumeMounts: []corev1.VolumeMount{ + {Name: driverInstallDirVolumeName, MountPath: "/run/nvidia/driver"}, + }, + }), + expectedOutput: NewDaemonset(). + WithHostPathVolume(driverInstallDirVolumeName, "/run/nvidia/driver", nil). + WithInitContainer( + corev1.Container{ + Name: "driver-validation", + VolumeMounts: []corev1.VolumeMount{ + {Name: driverInstallDirVolumeName, MountPath: "/run/nvidia/driver"}, + }, + }), + }, + { + description: "custom driverInstallDir with driver-install-dir volume", + driverInstallDir: "/custom-root", + input: NewDaemonset(). + WithHostPathVolume(driverInstallDirVolumeName, "/run/nvidia/driver", nil), + expectedOutput: NewDaemonset(). + WithHostPathVolume(driverInstallDirVolumeName, "/custom-root", nil), + }, + { + description: "custom driverInstallDir with driver-install-dir volume and driver-validation initContainer", + driverInstallDir: "/custom-root", + input: NewDaemonset(). + WithHostPathVolume(driverInstallDirVolumeName, "/run/nvidia/driver", nil). + WithInitContainer( + corev1.Container{ + Name: "driver-validation", + VolumeMounts: []corev1.VolumeMount{ + {Name: driverInstallDirVolumeName, MountPath: "/run/nvidia/driver"}, + }, + }), + expectedOutput: NewDaemonset(). + WithHostPathVolume(driverInstallDirVolumeName, "/custom-root", nil). + WithInitContainer( + corev1.Container{ + Name: "driver-validation", + VolumeMounts: []corev1.VolumeMount{ + {Name: driverInstallDirVolumeName, MountPath: "/custom-root"}, + }, + Env: []corev1.EnvVar{ + {Name: DriverInstallDirEnvName, Value: "/custom-root"}, + {Name: DriverInstallDirCtrPathEnvName, Value: "/custom-root"}, + }, + }), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + transformForDriverInstallDir(tc.input.DaemonSet, tc.driverInstallDir) + require.EqualValues(t, tc.expectedOutput, tc.input) + }) + } +} + +func TestTransformForRuntime(t *testing.T) { + testCases := []struct { + description string + runtime gpuv1.Runtime + input Daemonset + expectedOutput Daemonset + }{ + { + description: "containerd", + runtime: gpuv1.Containerd, + input: NewDaemonset(). + WithContainer(corev1.Container{Name: "test-ctr"}), + expectedOutput: NewDaemonset(). + WithHostPathVolume("containerd-config", filepath.Dir(DefaultContainerdConfigFile), newHostPathType(corev1.HostPathDirectoryOrCreate)). + WithHostPathVolume("containerd-socket", filepath.Dir(DefaultContainerdSocketFile), nil). + WithContainer(corev1.Container{ + Name: "test-ctr", + Env: []corev1.EnvVar{ + {Name: "RUNTIME", Value: gpuv1.Containerd.String()}, + {Name: "CONTAINERD_RUNTIME_CLASS", Value: DefaultRuntimeClass}, + {Name: "RUNTIME_CONFIG", Value: filepath.Join(DefaultRuntimeConfigTargetDir, filepath.Base(DefaultContainerdConfigFile))}, + {Name: "CONTAINERD_CONFIG", Value: filepath.Join(DefaultRuntimeConfigTargetDir, filepath.Base(DefaultContainerdConfigFile))}, + {Name: "RUNTIME_SOCKET", Value: filepath.Join(DefaultRuntimeSocketTargetDir, filepath.Base(DefaultContainerdSocketFile))}, + {Name: "CONTAINERD_SOCKET", Value: filepath.Join(DefaultRuntimeSocketTargetDir, filepath.Base(DefaultContainerdSocketFile))}, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "containerd-config", MountPath: DefaultRuntimeConfigTargetDir}, + {Name: "containerd-socket", MountPath: DefaultRuntimeSocketTargetDir}, + }, + }), + }, + { + description: "crio", + runtime: gpuv1.CRIO, + input: NewDaemonset().WithContainer(corev1.Container{Name: "test-ctr"}), + expectedOutput: NewDaemonset(). + WithHostPathVolume("crio-config", filepath.Dir(DefaultCRIOConfigFile), newHostPathType(corev1.HostPathDirectoryOrCreate)). + WithContainer(corev1.Container{ + Name: "test-ctr", + Env: []corev1.EnvVar{ + {Name: "RUNTIME", Value: gpuv1.CRIO.String()}, + {Name: "RUNTIME_CONFIG", Value: filepath.Join(DefaultRuntimeConfigTargetDir, filepath.Base(DefaultCRIOConfigFile))}, + {Name: "CRIO_CONFIG", Value: filepath.Join(DefaultRuntimeConfigTargetDir, filepath.Base(DefaultCRIOConfigFile))}, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "crio-config", MountPath: DefaultRuntimeConfigTargetDir}, + }, + }), + }, + } + + cp := &gpuv1.ClusterPolicySpec{Operator: gpuv1.OperatorSpec{RuntimeClass: DefaultRuntimeClass}} + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := transformForRuntime(tc.input.DaemonSet, cp, tc.runtime.String(), "test-ctr") + require.NoError(t, err) + require.EqualValues(t, tc.expectedOutput, tc.input) + }) + } +} + +func TestApplyUpdateStrategyConfig(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + dsSpec gpuv1.DaemonsetsSpec + errorExpected bool + expectedDs Daemonset + }{ + { + description: "empty daemonset spec configuration", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{}, + errorExpected: false, + expectedDs: NewDaemonset(), + }, + { + description: "invalid update strategy string, no rolling update fields configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{UpdateStrategy: "invalid"}, + errorExpected: false, + expectedDs: NewDaemonset(), + }, + { + description: "RollingUpdate update strategy string, no rolling update fields configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{UpdateStrategy: "RollingUpdate"}, + errorExpected: false, + expectedDs: NewDaemonset(), + }, + { + description: "RollingUpdate update strategy string, daemonset is driver pod", + ds: NewDaemonset().WithName(commonDriverDaemonsetName), + dsSpec: gpuv1.DaemonsetsSpec{ + UpdateStrategy: "RollingUpdate", + RollingUpdate: &gpuv1.RollingUpdateSpec{ + MaxUnavailable: "1", + }}, + errorExpected: false, + expectedDs: NewDaemonset().WithName(commonDriverDaemonsetName), + }, + { + description: "RollingUpdate update strategy string, integer maxUnavailable", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{ + UpdateStrategy: "RollingUpdate", + RollingUpdate: &gpuv1.RollingUpdateSpec{ + MaxUnavailable: "1", + }}, + errorExpected: false, + expectedDs: NewDaemonset().WithUpdateStrategy(appsv1.DaemonSetUpdateStrategy{ + Type: appsv1.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &appsv1.RollingUpdateDaemonSet{MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}}, + }), + }, + { + description: "RollingUpdate update strategy string, percentage maxUnavailable", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{ + UpdateStrategy: "RollingUpdate", + RollingUpdate: &gpuv1.RollingUpdateSpec{ + MaxUnavailable: "10%", + }}, + errorExpected: false, + expectedDs: NewDaemonset().WithUpdateStrategy(appsv1.DaemonSetUpdateStrategy{ + Type: appsv1.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &appsv1.RollingUpdateDaemonSet{MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "10%"}}, + }), + }, + { + description: "RollingUpdate update strategy string, invalid maxUnavailable", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{ + UpdateStrategy: "RollingUpdate", + RollingUpdate: &gpuv1.RollingUpdateSpec{ + MaxUnavailable: "10%abc", + }}, + errorExpected: true, + }, + { + description: "OnDelete update strategy", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{UpdateStrategy: "OnDelete"}, + errorExpected: false, + expectedDs: NewDaemonset().WithUpdateStrategy(appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + cpSpec := &gpuv1.ClusterPolicySpec{ + Daemonsets: tc.dsSpec, + } + err := applyUpdateStrategyConfig(tc.ds.DaemonSet, cpSpec) + if tc.errorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func TestApplyCommonDaemonSetConfig(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + dsSpec gpuv1.DaemonsetsSpec + errorExpected bool + expectedDs Daemonset + }{ + { + description: "empty daemonset spec configuration", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{}, + expectedDs: NewDaemonset(), + }, + { + description: "priorityclass configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{PriorityClassName: "test-priority-class"}, + expectedDs: NewDaemonset().WithPriorityClass("test-priority-class"), + }, + { + description: "toleration configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{ + Tolerations: []corev1.Toleration{ + { + Key: "test-key", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + expectedDs: NewDaemonset().WithTolerations([]corev1.Toleration{ + { + Key: "test-key", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }), + }, + { + description: "invalid updatestrategy configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{ + UpdateStrategy: "RollingUpdate", + RollingUpdate: &gpuv1.RollingUpdateSpec{ + MaxUnavailable: "10%abc", + }}, + errorExpected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + cpSpec := &gpuv1.ClusterPolicySpec{ + Daemonsets: tc.dsSpec, + } + err := applyCommonDaemonsetConfig(tc.ds.DaemonSet, cpSpec) + if tc.errorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func TestApplyCommonDaemonsetMetadata(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + dsSpec gpuv1.DaemonsetsSpec + expectedDs Daemonset + }{ + { + description: "empty daemonset spec configuration", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{}, + expectedDs: NewDaemonset(), + }, + { + description: "common daemonset labels configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{Labels: map[string]string{ + "key": "value", + "app": "value", + "app.kubernetes.io/part-of": "value", + }}, + expectedDs: NewDaemonset().WithPodLabels(map[string]string{ + "key": "value", + }), + }, + { + description: "common daemonset annotations configured", + ds: NewDaemonset(), + dsSpec: gpuv1.DaemonsetsSpec{Annotations: map[string]string{ + "key": "value", + "app": "value", + "app.kubernetes.io/part-of": "value", + }}, + expectedDs: NewDaemonset().WithPodAnnotations(map[string]string{ + "key": "value", + "app": "value", + "app.kubernetes.io/part-of": "value", + }), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + applyCommonDaemonsetMetadata(tc.ds.DaemonSet, &tc.dsSpec) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func TestTransformValidationInitContainer(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + cpSpec *gpuv1.ClusterPolicySpec + expectedDs Daemonset + }{ + { + description: "transform both driver and toolkit validation initContainers", + ds: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "driver-validation"}). + WithInitContainer(corev1.Container{Name: "toolkit-validation"}). + WithInitContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret"}, + Driver: gpuv1.DriverValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + Toolkit: gpuv1.ToolkitValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + expectedDs: NewDaemonset().WithInitContainer(corev1.Container{ + Name: "driver-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{{Name: "foo", Value: "bar"}}, + }).WithInitContainer(corev1.Container{ + Name: "toolkit-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{{Name: "foo", Value: "bar"}}, + }).WithInitContainer(corev1.Container{Name: "dummy"}).WithPullSecret("pull-secret"), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := transformValidationInitContainer(tc.ds.DaemonSet, tc.cpSpec) + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func newBoolPtr(b bool) *bool { + boolPtr := new(bool) + *boolPtr = b + return boolPtr +} + +func TestTransformDriverManagerInitContainer(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + cpSpec *gpuv1.ClusterPolicySpec + expectedDs Daemonset + }{ + { + description: "transform k8s-driver-manager initContainer", + ds: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "k8s-driver-manager"}). + WithInitContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Driver: gpuv1.DriverSpec{ + Manager: gpuv1.DriverManagerSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "k8s-driver-manager", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret"}, + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + GPUDirectRDMA: &gpuv1.GPUDirectRDMASpec{ + Enabled: newBoolPtr(true), + UseHostMOFED: newBoolPtr(true), + }, + }, + }, + expectedDs: NewDaemonset().WithInitContainer(corev1.Container{ + Name: "k8s-driver-manager", + Image: "nvcr.io/nvidia/cloud-native/k8s-driver-manager:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: GPUDirectRDMAEnabledEnvName, Value: "true"}, + {Name: UseHostMOFEDEnvName, Value: "true"}, + {Name: "foo", Value: "bar"}, + }, + }).WithInitContainer(corev1.Container{Name: "dummy"}).WithPullSecret("pull-secret"), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := transformDriverManagerInitContainer(tc.ds.DaemonSet, &tc.cpSpec.Driver.Manager, tc.cpSpec.Driver.GPUDirectRDMA) + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func TestTransformValidatorShared(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + cpSpec *gpuv1.ClusterPolicySpec + expectedDs Daemonset + }{ + { + description: "transform validator daemonset's main container", + ds: NewDaemonset().WithContainer(corev1.Container{Name: "test-ctr"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret"}, + Resources: &gpuv1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + "memory": resource.MustParse("200Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + "memory": resource.MustParse("200Mi"), + }, + }, + Args: []string{"--test-flag"}, + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + expectedDs: NewDaemonset().WithContainer(corev1.Container{ + Name: "test-ctr", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + "memory": resource.MustParse("200Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + "memory": resource.MustParse("200Mi"), + }, + }, + Args: []string{"--test-flag"}, + Env: []corev1.EnvVar{{Name: "foo", Value: "bar"}}, + }).WithPullSecret("pull-secret"), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := TransformValidatorShared(tc.ds.DaemonSet, tc.cpSpec) + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func TestTransformValidatorComponent(t *testing.T) { + testCases := []struct { + description string + pod Pod + cpSpec *gpuv1.ClusterPolicySpec + component string + expectedPod Pod + errorExpected bool + }{ + { + description: "no validation init container is a no-op", + pod: NewPod(), + cpSpec: nil, + component: "driver", + expectedPod: NewPod(), + }, + { + description: "invalid component", + pod: NewPod().WithInitContainer(corev1.Container{Name: "invalid-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{}, + }, + component: "invalid", + expectedPod: NewPod(), + errorExpected: true, + }, + { + description: "cuda validation", + pod: NewPod(). + WithInitContainer(corev1.Container{Name: "cuda-validation"}). + WithRuntimeClassName("nvidia"), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret1", "pull-secret2"}, + CUDA: gpuv1.CUDAValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + component: "cuda", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "cuda-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "foo", Value: "bar"}, + {Name: ValidatorImageEnvName, Value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0"}, + {Name: ValidatorImagePullPolicyEnvName, Value: "IfNotPresent"}, + {Name: ValidatorImagePullSecretsEnvName, Value: "pull-secret1,pull-secret2"}, + {Name: ValidatorRuntimeClassEnvName, Value: "nvidia"}, + }, + }).WithRuntimeClassName("nvidia"), + }, + { + description: "plugin validation", + pod: NewPod(). + WithInitContainer(corev1.Container{Name: "plugin-validation"}). + WithRuntimeClassName("nvidia"), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret1", "pull-secret2"}, + Plugin: gpuv1.PluginValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + MIG: gpuv1.MIGSpec{ + Strategy: gpuv1.MIGStrategySingle, + }, + }, + component: "plugin", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "plugin-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "foo", Value: "bar"}, + {Name: ValidatorImageEnvName, Value: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0"}, + {Name: ValidatorImagePullPolicyEnvName, Value: "IfNotPresent"}, + {Name: ValidatorImagePullSecretsEnvName, Value: "pull-secret1,pull-secret2"}, + {Name: ValidatorRuntimeClassEnvName, Value: "nvidia"}, + {Name: MigStrategyEnvName, Value: string(gpuv1.MIGStrategySingle)}, + }, + }).WithRuntimeClassName("nvidia"), + }, + { + description: "plugin validation removed when plugin is disabled", + pod: NewPod(). + WithInitContainer(corev1.Container{Name: "plugin-validation"}). + WithInitContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + }, + DevicePlugin: gpuv1.DevicePluginSpec{Enabled: newBoolPtr(false)}, + }, + component: "plugin", + expectedPod: NewPod().WithInitContainer(corev1.Container{Name: "dummy"}), + }, + { + description: "driver validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "driver-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + Driver: gpuv1.DriverValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + component: "driver", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "driver-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "foo", Value: "bar"}, + }, + }), + }, + { + description: "nvidia-fs validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "nvidia-fs-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + }, + GPUDirectStorage: &gpuv1.GPUDirectStorageSpec{Enabled: newBoolPtr(true)}, + }, + component: "nvidia-fs", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "nvidia-fs-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + }), + }, + { + description: "nvidia-fs validation is removed when gds is disabled", + pod: NewPod(). + WithInitContainer(corev1.Container{Name: "nvidia-fs-validation"}). + WithInitContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + }, + GPUDirectStorage: &gpuv1.GPUDirectStorageSpec{Enabled: newBoolPtr(false)}, + }, + component: "nvidia-fs", + expectedPod: NewPod().WithInitContainer(corev1.Container{Name: "dummy"}), + }, + { + description: "cc-manager validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "cc-manager-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + }, + CCManager: gpuv1.CCManagerSpec{Enabled: newBoolPtr(true)}, + }, + component: "cc-manager", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "cc-manager-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + }), + }, + { + description: "cc-manager validation is removed when cc-manager is disabled", + pod: NewPod(). + WithInitContainer(corev1.Container{Name: "cc-manager-validation"}). + WithInitContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + }, + CCManager: gpuv1.CCManagerSpec{Enabled: newBoolPtr(false)}, + }, + component: "cc-manager", + expectedPod: NewPod().WithInitContainer(corev1.Container{Name: "dummy"}), + }, + { + description: "toolkit validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "toolkit-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + Toolkit: gpuv1.ToolkitValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + component: "toolkit", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "toolkit-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "foo", Value: "bar"}, + }, + }), + }, + { + description: "vfio-pci validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "vfio-pci-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + VFIOPCI: gpuv1.VFIOPCIValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + component: "vfio-pci", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "vfio-pci-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "DEFAULT_GPU_WORKLOAD_CONFIG", Value: defaultGPUWorkloadConfig}, + {Name: "foo", Value: "bar"}, + }, + }), + }, + { + description: "vgpu-manager validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "vgpu-manager-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + VGPUManager: gpuv1.VGPUManagerValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + component: "vgpu-manager", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "vgpu-manager-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "DEFAULT_GPU_WORKLOAD_CONFIG", Value: defaultGPUWorkloadConfig}, + {Name: "foo", Value: "bar"}, + }, + }), + }, + { + description: "vgpu-devices validation", + pod: NewPod().WithInitContainer(corev1.Container{Name: "vgpu-devices-validation"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + VGPUDevices: gpuv1.VGPUDevicesValidatorSpec{ + Env: []gpuv1.EnvVar{{Name: "foo", Value: "bar"}}, + }, + }, + }, + component: "vgpu-devices", + expectedPod: NewPod().WithInitContainer(corev1.Container{ + Name: "vgpu-devices-validation", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + {Name: "DEFAULT_GPU_WORKLOAD_CONFIG", Value: defaultGPUWorkloadConfig}, + {Name: "foo", Value: "bar"}, + }, + }), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := TransformValidatorComponent(tc.cpSpec, &tc.pod.Pod.Spec, tc.component) + if tc.errorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + require.EqualValues(t, tc.expectedPod, tc.pod) + }) + } +} + +func TestTransformValidator(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + cpSpec *gpuv1.ClusterPolicySpec + expectedDs Daemonset + errorExpected bool + }{ + { + description: "empty validator spec", + ds: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "dummy"}). + WithContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{}, + }, + expectedDs: NewDaemonset(), + errorExpected: true, + }, + { + description: "valid validator spec", + ds: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "dummy"}). + WithContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret"}, + }, + }, + expectedDs: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "dummy"}). + WithContainer(corev1.Container{ + Name: "dummy", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + }). + WithPullSecret("pull-secret"). + WithRuntimeClassName("nvidia"), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := TransformValidator(tc.ds.DaemonSet, tc.cpSpec, ClusterPolicyController{runtime: gpuv1.Containerd, logger: ctrl.Log.WithName("test")}) + if tc.errorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} + +func TestTransformSandboxValidator(t *testing.T) { + testCases := []struct { + description string + ds Daemonset + cpSpec *gpuv1.ClusterPolicySpec + expectedDs Daemonset + errorExpected bool + }{ + { + description: "empty validator spec", + ds: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "dummy"}). + WithContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{}, + }, + expectedDs: NewDaemonset(), + errorExpected: true, + }, + { + description: "valid validator spec", + ds: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "dummy"}). + WithContainer(corev1.Container{Name: "dummy"}), + cpSpec: &gpuv1.ClusterPolicySpec{ + Validator: gpuv1.ValidatorSpec{ + Repository: "nvcr.io/nvidia/cloud-native", + Image: "gpu-operator-validator", + Version: "v1.0.0", + ImagePullPolicy: "IfNotPresent", + ImagePullSecrets: []string{"pull-secret"}, + }, + }, + expectedDs: NewDaemonset(). + WithInitContainer(corev1.Container{Name: "dummy"}). + WithContainer(corev1.Container{ + Name: "dummy", + Image: "nvcr.io/nvidia/cloud-native/gpu-operator-validator:v1.0.0", + ImagePullPolicy: corev1.PullIfNotPresent, + }). + WithPullSecret("pull-secret"), + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + err := TransformSandboxValidator(tc.ds.DaemonSet, tc.cpSpec, ClusterPolicyController{runtime: gpuv1.Containerd, logger: ctrl.Log.WithName("test")}) + if tc.errorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + require.EqualValues(t, tc.expectedDs, tc.ds) + }) + } +} diff --git a/controllers/upgrade_controller.go b/controllers/upgrade_controller.go index ab179ac78..0c6de580d 100644 --- a/controllers/upgrade_controller.go +++ b/controllers/upgrade_controller.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/util/workqueue" @@ -42,8 +43,8 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" ) // UpgradeReconciler reconciles Driver Daemon Sets for upgrade @@ -231,29 +232,38 @@ func (r *UpgradeReconciler) removeNodeUpgradeStateLabels(ctx context.Context) er //nolint:dupl func (r *UpgradeReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { // Create a new controller - c, err := controller.New("upgrade-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: 1, RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(minDelayCR, maxDelayCR)}) + c, err := controller.New("upgrade-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: 1, + RateLimiter: workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](minDelayCR, maxDelayCR)}) if err != nil { return err } // Watch for changes to primary resource ClusterPolicy - err = c.Watch(source.Kind(mgr.GetCache(), &gpuv1.ClusterPolicy{}), &handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{}) + err = c.Watch(source.Kind( + mgr.GetCache(), + &gpuv1.ClusterPolicy{}, + &handler.TypedEnqueueRequestForObject[*gpuv1.ClusterPolicy]{}, + predicate.TypedGenerationChangedPredicate[*gpuv1.ClusterPolicy]{}), + ) if err != nil { return err } // Define a mapping from the Node object in the event to one or more // ClusterPolicy objects to Reconcile - nodeMapFn := func(ctx context.Context, a client.Object) []reconcile.Request { + nodeMapFn := func(ctx context.Context, o *corev1.Node) []reconcile.Request { return getClusterPoliciesToReconcile(ctx, mgr.GetClient()) } // Watch for changes to node labels // TODO: only watch for changes to upgrade state label err = c.Watch( - source.Kind(mgr.GetCache(), &corev1.Node{}), - handler.EnqueueRequestsFromMapFunc(nodeMapFn), - predicate.LabelChangedPredicate{}, + source.Kind( + mgr.GetCache(), + &corev1.Node{}, + handler.TypedEnqueueRequestsFromMapFunc[*corev1.Node](nodeMapFn), + predicate.TypedLabelChangedPredicate[*corev1.Node]{}, + ), ) if err != nil { return err @@ -264,13 +274,13 @@ func (r *UpgradeReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag // // For events generated by DaemonSets, ensure the object is // owned by either ClusterPolicy or NVIDIADriver. - dsMapFn := func(ctx context.Context, a client.Object) []reconcile.Request { + dsMapFn := func(ctx context.Context, a *appsv1.DaemonSet) []reconcile.Request { ownerRefs := a.GetOwnerReferences() ownedByNVIDIA := false for _, owner := range ownerRefs { - if (owner.APIVersion == gpuv1.GroupVersion.String() && owner.Kind == "ClusterPolicy") || - (owner.APIVersion == nvidiav1alpha1.GroupVersion.String() && owner.Kind == "NVIDIADriver") { + if (owner.APIVersion == gpuv1.SchemeGroupVersion.String() && owner.Kind == "ClusterPolicy") || + (owner.APIVersion == nvidiav1alpha1.SchemeGroupVersion.String() && owner.Kind == "NVIDIADriver") { ownedByNVIDIA = true break } @@ -285,27 +295,34 @@ func (r *UpgradeReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag // Watch for changes to NVIDIA driver daemonsets and enqueue ClusterPolicy // TODO: use one common label to identify all NVIDIA driver DaemonSets - appLabelSelector, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: map[string]string{DriverLabelKey: DriverLabelValue}}) - if err != nil { - return fmt.Errorf("failed to create label selector predicate: %w", err) - } - dtkLabelSelector, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: map[string]string{ocpDriverToolkitIdentificationLabel: ocpDriverToolkitIdentificationValue}}) - if err != nil { - return fmt.Errorf("failed to create label selector predicate: %w", err) - } - componentLabelSelector, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: map[string]string{AppComponentLabelKey: AppComponentLabelValue}}) - if err != nil { - return fmt.Errorf("failed to create label selector predicate: %w", err) - } + appLabelSelector := predicate.NewTypedPredicateFuncs(func(ds *appsv1.DaemonSet) bool { + ls := metav1.LabelSelector{MatchLabels: map[string]string{DriverLabelKey: DriverLabelValue}} + selector, _ := metav1.LabelSelectorAsSelector(&ls) + return selector.Matches(labels.Set(ds.GetLabels())) + }) + + dtkLabelSelector := predicate.NewTypedPredicateFuncs(func(ds *appsv1.DaemonSet) bool { + ls := metav1.LabelSelector{MatchLabels: map[string]string{ocpDriverToolkitIdentificationLabel: ocpDriverToolkitIdentificationValue}} + selector, _ := metav1.LabelSelectorAsSelector(&ls) + return selector.Matches(labels.Set(ds.GetLabels())) + }) + + componentLabelSelector := predicate.NewTypedPredicateFuncs(func(ds *appsv1.DaemonSet) bool { + ls := metav1.LabelSelector{MatchLabels: map[string]string{AppComponentLabelKey: AppComponentLabelValue}} + selector, _ := metav1.LabelSelectorAsSelector(&ls) + return selector.Matches(labels.Set(ds.GetLabels())) + }) err = c.Watch( - source.Kind(mgr.GetCache(), &appsv1.DaemonSet{}), - handler.EnqueueRequestsFromMapFunc(dsMapFn), - predicate.And( - predicate.GenerationChangedPredicate{}, - predicate.Or(appLabelSelector, dtkLabelSelector, componentLabelSelector), - ), - ) + source.Kind( + mgr.GetCache(), + &appsv1.DaemonSet{}, + handler.TypedEnqueueRequestsFromMapFunc[*appsv1.DaemonSet](dsMapFn), + predicate.And[*appsv1.DaemonSet]( + predicate.TypedGenerationChangedPredicate[*appsv1.DaemonSet]{}, + predicate.Or[*appsv1.DaemonSet](appLabelSelector, dtkLabelSelector, componentLabelSelector), + ), + )) if err != nil { return err } diff --git a/deployments/gpu-operator/Chart.lock b/deployments/gpu-operator/Chart.lock index f5c7fcca3..5d1a7d3dc 100644 --- a/deployments/gpu-operator/Chart.lock +++ b/deployments/gpu-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: node-feature-discovery repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts - version: 0.14.2 -digest: sha256:84ec59c0c12da825ca7dc25bdac63d0f2106822a129f7fe1f9d60a4023a543ce -generated: "2023-10-10T11:26:00.823757+02:00" + version: 0.16.6 +digest: sha256:e7b02cbdf9daff49892c0b74c50da2ed11e18eff2105a1b1abc9a8f2ebd8be47 +generated: "2024-10-31T07:12:50.141904-07:00" diff --git a/deployments/gpu-operator/Chart.yaml b/deployments/gpu-operator/Chart.yaml index 08cdee92a..59f9e6904 100644 --- a/deployments/gpu-operator/Chart.yaml +++ b/deployments/gpu-operator/Chart.yaml @@ -19,6 +19,6 @@ keywords: dependencies: - name: node-feature-discovery - version: v0.14.2 + version: v0.16.6 repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts condition: nfd.enabled diff --git a/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml b/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml index 8bd1f818d..7656c732f 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.14.2 +appVersion: v0.16.6 description: 'Detects hardware features available on each node in a Kubernetes cluster, and advertises those features using node labels. ' home: https://github.com/kubernetes-sigs/node-feature-discovery @@ -11,4 +11,4 @@ name: node-feature-discovery sources: - https://github.com/kubernetes-sigs/node-feature-discovery type: application -version: 0.14.2 +version: 0.16.6 diff --git a/deployments/gpu-operator/charts/node-feature-discovery/README.md b/deployments/gpu-operator/charts/node-feature-discovery/README.md index 16b5254d5..93734f8b7 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/README.md +++ b/deployments/gpu-operator/charts/node-feature-discovery/README.md @@ -6,5 +6,5 @@ labels. NFD provides flexible configuration and extension points for a wide range of vendor and application specific node labeling needs. See -[NFD documentation](https://kubernetes-sigs.github.io/node-feature-discovery/v0.14/deployment/helm.html) +[NFD documentation](https://kubernetes-sigs.github.io/node-feature-discovery/v0.16/deployment/helm.html) for deployment instructions. diff --git a/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml b/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml index 6866c7ffe..0a73c5dca 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.14.0 name: nodefeatures.nfd.k8s-sigs.io spec: group: nfd.k8s-sigs.io @@ -17,23 +17,30 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: NodeFeature resource holds the features discovered for one node - in the cluster. + description: |- + NodeFeature resource holds the features discovered for one node in the + cluster. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: NodeFeatureSpec describes a NodeFeature object. + description: Specification of the NodeFeature, containing features discovered + for a node. properties: features: description: Features is the full "raw" features data that has been @@ -47,6 +54,7 @@ spec: elements: additionalProperties: type: string + description: Individual features of the feature set. type: object required: - elements @@ -64,6 +72,7 @@ spec: description: Nil is a dummy empty struct for protobuf compatibility type: object + description: Individual features of the feature set. type: object required: - elements @@ -77,6 +86,7 @@ spec: which is an instance having multiple attributes. properties: elements: + description: Individual features of the feature set. items: description: InstanceFeature represents one instance of a complex features, e.g. a device. @@ -84,6 +94,7 @@ spec: attributes: additionalProperties: type: string + description: Attributes of the instance feature. type: object required: - attributes @@ -113,7 +124,278 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.14.0 + name: nodefeaturegroups.nfd.k8s-sigs.io +spec: + group: nfd.k8s-sigs.io + names: + kind: NodeFeatureGroup + listKind: NodeFeatureGroupList + plural: nodefeaturegroups + shortNames: + - nfg + singular: nodefeaturegroup + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeFeatureGroup resource holds Node pools by featureGroup + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the rules to be evaluated. + properties: + featureGroupRules: + description: List of rules to evaluate to determine nodes that belong + in this group. + items: + description: GroupRule defines a rule for nodegroup filtering. + properties: + matchAny: + description: MatchAny specifies a list of matchers one of which + must match. + items: + description: MatchAnyElem specifies one sub-matcher of MatchAny. + properties: + matchFeatures: + description: MatchFeatures specifies a set of matcher + terms all of which must match. + items: + description: |- + FeatureMatcherTerm defines requirements against one feature set. All + requirements (specified as MatchExpressions) are evaluated against each + element in the feature set. + properties: + feature: + description: Feature is the name of the feature + set to match against. + type: string + matchExpressions: + additionalProperties: + description: |- + MatchExpression specifies an expression to evaluate against a set of input + values. It contains an operator that is applied when matching the input and + an array of values that the operator evaluates the input against. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op + type: object + description: |- + MatchExpressions is the set of per-element expressions evaluated. These + match against the value of the specified elements. + type: object + matchName: + description: |- + MatchName in an expression that is matched against the name of each + element in the feature set. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op + type: object + required: + - feature + type: object + type: array + required: + - matchFeatures + type: object + type: array + matchFeatures: + description: MatchFeatures specifies a set of matcher terms + all of which must match. + items: + description: |- + FeatureMatcherTerm defines requirements against one feature set. All + requirements (specified as MatchExpressions) are evaluated against each + element in the feature set. + properties: + feature: + description: Feature is the name of the feature set to + match against. + type: string + matchExpressions: + additionalProperties: + description: |- + MatchExpression specifies an expression to evaluate against a set of input + values. It contains an operator that is applied when matching the input and + an array of values that the operator evaluates the input against. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op + type: object + description: |- + MatchExpressions is the set of per-element expressions evaluated. These + match against the value of the specified elements. + type: object + matchName: + description: |- + MatchName in an expression that is matched against the name of each + element in the feature set. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op + type: object + required: + - feature + type: object + type: array + name: + description: Name of the rule. + type: string + required: + - name + type: object + type: array + required: + - featureGroupRules + type: object + status: + description: |- + Status of the NodeFeatureGroup after the most recent evaluation of the + specification. + properties: + nodes: + description: Nodes is a list of FeatureGroupNode in the cluster that + match the featureGroupRules + items: + properties: + name: + description: Name of the node. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 name: nodefeaturerules.nfd.k8s-sigs.io spec: group: nfd.k8s-sigs.io @@ -129,23 +411,29 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: NodeFeatureRule resource specifies a configuration for feature-based + description: |- + NodeFeatureRule resource specifies a configuration for feature-based customization of node objects, such as node labeling. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: NodeFeatureRuleSpec describes a NodeFeatureRule. + description: Spec defines the rules to be evaluated. properties: rules: description: Rules is a list of node customization rules. @@ -153,6 +441,11 @@ spec: description: Rule defines a rule for node customization such as labeling. properties: + annotations: + additionalProperties: + type: string + description: Annotations to create if the rule matches. + type: object extendedResources: additionalProperties: type: string @@ -164,10 +457,10 @@ spec: description: Labels to create if the rule matches. type: object labelsTemplate: - description: LabelsTemplate specifies a template to expand for - dynamically generating multiple labels. Data (after template - expansion) must be keys with an optional value ([=]) - separated by newlines. + description: |- + LabelsTemplate specifies a template to expand for dynamically generating + multiple labels. Data (after template expansion) must be keys with an + optional value ([=]) separated by newlines. type: string matchAny: description: MatchAny specifies a list of matchers one of which @@ -179,25 +472,21 @@ spec: description: MatchFeatures specifies a set of matcher terms all of which must match. items: - description: FeatureMatcherTerm defines requirements - against one feature set. All requirements (specified - as MatchExpressions) are evaluated against each element - in the feature set. + description: |- + FeatureMatcherTerm defines requirements against one feature set. All + requirements (specified as MatchExpressions) are evaluated against each + element in the feature set. properties: feature: + description: Feature is the name of the feature + set to match against. type: string matchExpressions: additionalProperties: - description: "MatchExpression specifies an expression - to evaluate against a set of input values. It - contains an operator that is applied when matching - the input and an array of values that the operator - evaluates the input against. \n NB: CreateMatchExpression - or MustCreateMatchExpression() should be used - for creating new instances. \n NB: Validate() - must be called if Op or Value fields are modified - or if a new instance is created from scratch - without using the helper functions." + description: |- + MatchExpression specifies an expression to evaluate against a set of input + values. It contains an operator that is applied when matching the input and + an array of values that the operator evaluates the input against. properties: op: description: Op is the operator to be applied. @@ -214,28 +503,56 @@ spec: - IsFalse type: string value: - description: Value is the list of values that - the operand evaluates the input against. - Value should be empty if the operator is - Exists, DoesNotExist, IsTrue or IsFalse. - Value should contain exactly one element - if the operator is Gt or Lt and exactly - two elements if the operator is GtLt. In - other cases Value should contain at least - one element. + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. items: type: string type: array required: - op type: object - description: MatchExpressionSet contains a set of - MatchExpressions, each of which is evaluated against - a set of input values. + description: |- + MatchExpressions is the set of per-element expressions evaluated. These + match against the value of the specified elements. + type: object + matchName: + description: |- + MatchName in an expression that is matched against the name of each + element in the feature set. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op type: object required: - feature - - matchExpressions type: object type: array required: @@ -246,23 +563,21 @@ spec: description: MatchFeatures specifies a set of matcher terms all of which must match. items: - description: FeatureMatcherTerm defines requirements against - one feature set. All requirements (specified as MatchExpressions) - are evaluated against each element in the feature set. + description: |- + FeatureMatcherTerm defines requirements against one feature set. All + requirements (specified as MatchExpressions) are evaluated against each + element in the feature set. properties: feature: + description: Feature is the name of the feature set to + match against. type: string matchExpressions: additionalProperties: - description: "MatchExpression specifies an expression - to evaluate against a set of input values. It contains - an operator that is applied when matching the input - and an array of values that the operator evaluates - the input against. \n NB: CreateMatchExpression or - MustCreateMatchExpression() should be used for creating - new instances. \n NB: Validate() must be called if - Op or Value fields are modified or if a new instance - is created from scratch without using the helper functions." + description: |- + MatchExpression specifies an expression to evaluate against a set of input + values. It contains an operator that is applied when matching the input and + an array of values that the operator evaluates the input against. properties: op: description: Op is the operator to be applied. @@ -279,25 +594,56 @@ spec: - IsFalse type: string value: - description: Value is the list of values that the - operand evaluates the input against. Value should - be empty if the operator is Exists, DoesNotExist, - IsTrue or IsFalse. Value should contain exactly - one element if the operator is Gt or Lt and exactly - two elements if the operator is GtLt. In other - cases Value should contain at least one element. + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. items: type: string type: array required: - op type: object - description: MatchExpressionSet contains a set of MatchExpressions, - each of which is evaluated against a set of input values. + description: |- + MatchExpressions is the set of per-element expressions evaluated. These + match against the value of the specified elements. + type: object + matchName: + description: |- + MatchName in an expression that is matched against the name of each + element in the feature set. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: |- + Value is the list of values that the operand evaluates the input + against. Value should be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly one element if the + operator is Gt or Lt and exactly two elements if the operator is GtLt. + In other cases Value should contain at least one element. + items: + type: string + type: array + required: + - op type: object required: - feature - - matchExpressions type: object type: array name: @@ -306,21 +652,24 @@ spec: taints: description: Taints to create if the rule matches. items: - description: The node this Taint is attached to has the "effect" - on any pod that does not tolerate the Taint. + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. properties: effect: - description: Required. The effect of the taint on pods - that do not tolerate the taint. Valid effects are NoSchedule, - PreferNoSchedule and NoExecute. + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. type: string key: description: Required. The taint key to be applied to a node. type: string timeAdded: - description: TimeAdded represents the time at which the - taint was added. It is only written for NoExecute taints. + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. format: date-time type: string value: @@ -335,17 +684,17 @@ spec: vars: additionalProperties: type: string - description: Vars is the variables to store if the rule matches. - Variables do not directly inflict any changes in the node - object. However, they can be referenced from other rules enabling - more complex rule hierarchies, without exposing intermediary - output values as labels. + description: |- + Vars is the variables to store if the rule matches. Variables do not + directly inflict any changes in the node object. However, they can be + referenced from other rules enabling more complex rule hierarchies, + without exposing intermediary output values as labels. type: object varsTemplate: - description: VarsTemplate specifies a template to expand for - dynamically generating multiple variables. Data (after template - expansion) must be keys with an optional value ([=]) - separated by newlines. + description: |- + VarsTemplate specifies a template to expand for dynamically generating + multiple variables. Data (after template expansion) must be keys with an + optional value ([=]) separated by newlines. type: string required: - name diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml index ac2e51fc1..2d1576022 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml @@ -1,4 +1,5 @@ {{- if .Values.tls.certManager }} +{{- if .Values.master.enable }} --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -17,14 +18,17 @@ spec: # first one is configured for use by the worker; below are for completeness - {{ include "node-feature-discovery.fullname" . }}-master.{{ include "node-feature-discovery.namespace" . }}.svc - {{ include "node-feature-discovery.fullname" . }}-master.{{ include "node-feature-discovery.namespace" . }}.svc.cluster.local - # localhost needed for grpc_health_probe - - localhost issuerRef: - name: nfd-ca-issuer + name: {{ default "nfd-ca-issuer" .Values.tls.certManagerCertificate.issuerName }} + {{- if and .Values.tls.certManagerCertificate.issuerName .Values.tls.certManagerCertificate.issuerKind }} + kind: {{ .Values.tls.certManagerCertificate.issuerKind }} + {{- else }} kind: Issuer + {{- end }} group: cert-manager.io - +{{- end }} --- +{{- if .Values.worker.enable }} apiVersion: cert-manager.io/v1 kind: Certificate metadata: @@ -39,9 +43,14 @@ spec: dnsNames: - {{ include "node-feature-discovery.fullname" . }}-worker.{{ include "node-feature-discovery.namespace" . }}.svc.cluster.local issuerRef: - name: nfd-ca-issuer + name: {{ default "nfd-ca-issuer" .Values.tls.certManagerCertificate.issuerName }} + {{- if and .Values.tls.certManagerCertificate.issuerName .Values.tls.certManagerCertificate.issuerKind }} + kind: {{ .Values.tls.certManagerCertificate.issuerKind }} + {{- else }} kind: Issuer + {{- end }} group: cert-manager.io +{{- end }} {{- if .Values.topologyUpdater.enable }} --- @@ -59,8 +68,12 @@ spec: dnsNames: - {{ include "node-feature-discovery.fullname" . }}-topology-updater.{{ include "node-feature-discovery.namespace" . }}.svc.cluster.local issuerRef: - name: nfd-ca-issuer + name: {{ default "nfd-ca-issuer" .Values.tls.certManagerCertificate.issuerName }} + {{- if and .Values.tls.certManagerCertificate.issuerName .Values.tls.certManagerCertificate.issuerKind }} + kind: {{ .Values.tls.certManagerCertificate.issuerKind }} + {{- else }} kind: Issuer + {{- end }} group: cert-manager.io {{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-issuer.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-issuer.yaml index f3c57acea..874468908 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-issuer.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-issuer.yaml @@ -1,4 +1,4 @@ -{{- if .Values.tls.certManager }} +{{- if and .Values.tls.certManager (not .Values.tls.certManagerCertificate.issuerName ) }} # See https://cert-manager.io/docs/configuration/selfsigned/#bootstrapping-ca-issuers # - Create a self signed issuer # - Use this to create a CA cert diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml index d4329338b..f935cfe41 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if .Values.master.rbac.create }} +{{- if and .Values.master.enable .Values.master.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -21,10 +21,18 @@ rules: resources: - nodefeatures - nodefeaturerules + - nodefeaturegroups verbs: - get - list - watch +- apiGroups: + - nfd.k8s-sigs.io + resources: + - nodefeaturegroups/status + verbs: + - patch + - update - apiGroups: - coordination.k8s.io resources: @@ -58,6 +66,12 @@ rules: verbs: - get - list +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get - apiGroups: - "" resources: @@ -80,7 +94,7 @@ rules: - update {{- end }} -{{- if and .Values.gc.enable .Values.gc.rbac.create (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) }} +{{- if and .Values.gc.enable .Values.gc.rbac.create (or (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi) .Values.topologyUpdater.enable) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml index 87b3003e2..3f717988b 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if .Values.master.rbac.create }} +{{- if and .Values.master.enable .Values.master.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -33,7 +33,7 @@ subjects: namespace: {{ include "node-feature-discovery.namespace" . }} {{- end }} -{{- if and .Values.gc.enable .Values.gc.rbac.create (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) }} +{{- if and .Values.gc.enable .Values.gc.rbac.create (or (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi) .Values.topologyUpdater.enable) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml index e77ca136c..733131a03 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enable }} apiVersion: apps/v1 kind: Deployment metadata: @@ -12,6 +13,7 @@ metadata: {{- end }} spec: replicas: {{ .Values.master.replicaCount }} + revisionHistoryLimit: {{ .Values.master.revisionHistoryLimit }} selector: matchLabels: {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} @@ -26,6 +28,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} spec: + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} @@ -34,6 +39,7 @@ spec: enableServiceLinks: false securityContext: {{- toYaml .Values.master.podSecurityContext | nindent 8 }} + hostNetwork: {{ .Values.master.hostNetwork }} containers: - name: master securityContext: @@ -41,42 +47,24 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} livenessProbe: - exec: - command: - - "/usr/bin/grpc_health_probe" - - "-addr=:{{ .Values.master.port | default "8080" }}" - {{- if .Values.tls.enable }} - - "-tls" - - "-tls-ca-cert=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - - "-tls-client-key=/etc/kubernetes/node-feature-discovery/certs/tls.key" - - "-tls-client-cert=/etc/kubernetes/node-feature-discovery/certs/tls.crt" - {{- end }} - initialDelaySeconds: 10 - periodSeconds: 10 + {{- toYaml .Values.master.livenessProbe | nindent 12 }} readinessProbe: - exec: - command: - - "/usr/bin/grpc_health_probe" - - "-addr=:{{ .Values.master.port | default "8080" }}" - {{- if .Values.tls.enable }} - - "-tls" - - "-tls-ca-cert=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - - "-tls-client-key=/etc/kubernetes/node-feature-discovery/certs/tls.key" - - "-tls-client-cert=/etc/kubernetes/node-feature-discovery/certs/tls.crt" - {{- end }} - initialDelaySeconds: 5 - periodSeconds: 10 - failureThreshold: 10 + {{- toYaml .Values.master.readinessProbe | nindent 12 }} ports: - containerPort: {{ .Values.master.port | default "8080" }} name: grpc - containerPort: {{ .Values.master.metricsPort | default "8081" }} name: metrics + - containerPort: {{ .Values.master.healthPort | default "8082" }} + name: health env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + {{- with .Values.master.extraEnvs }} + {{- toYaml . | nindent 8 }} + {{- end}} command: - "nfd-master" resources: @@ -85,9 +73,8 @@ spec: {{- if .Values.master.instance | empty | not }} - "-instance={{ .Values.master.instance }}" {{- end }} + {{- if not (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi) }} - "-port={{ .Values.master.port | default "8080" }}" - {{- if not .Values.enableNodeFeatureApi }} - - "-enable-nodefeature-api=false" {{- else if gt (int .Values.master.replicaCount) 1 }} - "-enable-leader-election" {{- end }} @@ -123,7 +110,12 @@ spec: - "-key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" - "-cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" {{- end }} + # Go over featureGates and add the feature-gate flag + {{- range $key, $value := .Values.featureGates }} + - "-feature-gates={{ $key }}={{ $value }}" + {{- end }} - "-metrics={{ .Values.master.metricsPort | default "8081" }}" + - "-grpc-health={{ .Values.master.healthPort | default "8082" }}" volumeMounts: {{- if .Values.tls.enable }} - name: nfd-master-cert @@ -157,3 +149,4 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml index d803eef40..375f93827 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.gc.enable (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) -}} +{{- if and .Values.gc.enable (or (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi) .Values.topologyUpdater.enable) -}} apiVersion: apps/v1 kind: Deployment metadata: @@ -13,6 +13,7 @@ metadata: {{- end }} spec: replicas: {{ .Values.gc.replicaCount | default 1 }} + revisionHistoryLimit: {{ .Values.gc.revisionHistoryLimit }} selector: matchLabels: {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} @@ -29,12 +30,16 @@ spec: spec: serviceAccountName: {{ include "node-feature-discovery.gc.serviceAccountName" . }} dnsPolicy: ClusterFirstWithHostNet + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} securityContext: {{- toYaml .Values.gc.podSecurityContext | nindent 8 }} + hostNetwork: {{ .Values.gc.hostNetwork }} containers: - name: gc image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" @@ -44,6 +49,9 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + {{- with .Values.gc.extraEnvs }} + {{- toYaml . | nindent 8 }} + {{- end}} command: - "nfd-gc" args: @@ -58,6 +66,9 @@ spec: drop: [ "ALL" ] readOnlyRootFilesystem: true runAsNonRoot: true + ports: + - name: metrics + containerPort: {{ .Values.gc.metricsPort | default "8081"}} {{- with .Values.gc.nodeSelector }} nodeSelector: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml index c806a8e5d..9c6e01cde 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enable }} apiVersion: v1 kind: ConfigMap metadata: @@ -8,3 +9,4 @@ metadata: data: nfd-master.conf: |- {{- .Values.master.config | toYaml | nindent 4 }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-topologyupdater-conf.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-topologyupdater-conf.yaml index 9867f5089..8d03aa2d8 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-topologyupdater-conf.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-topologyupdater-conf.yaml @@ -1,3 +1,4 @@ +{{- if .Values.topologyUpdater.enable -}} apiVersion: v1 kind: ConfigMap metadata: @@ -8,3 +9,4 @@ metadata: data: nfd-topology-updater.conf: |- {{- .Values.topologyUpdater.config | toYaml | nindent 4 }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml index 61d2a481a..a2299dea1 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml @@ -1,3 +1,4 @@ +{{- if .Values.worker.enable }} apiVersion: v1 kind: ConfigMap metadata: @@ -8,3 +9,4 @@ metadata: data: nfd-worker.conf: |- {{- .Values.worker.config | toYaml | nindent 4 }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/post-delete-job.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/post-delete-job.yaml new file mode 100644 index 000000000..4364f1aa2 --- /dev/null +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/post-delete-job.yaml @@ -0,0 +1,94 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-prune + namespace: {{ include "node-feature-discovery.namespace" . }} + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-prune + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +rules: +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - get + - patch + - update + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-prune + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "node-feature-discovery.fullname" . }}-prune +subjects: +- kind: ServiceAccount + name: {{ include "node-feature-discovery.fullname" . }}-prune + namespace: {{ include "node-feature-discovery.namespace" . }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "node-feature-discovery.fullname" . }}-prune + namespace: {{ include "node-feature-discovery.namespace" . }} + labels: + {{- include "node-feature-discovery.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + template: + metadata: + labels: + {{- include "node-feature-discovery.labels" . | nindent 8 }} + role: prune + spec: + serviceAccountName: {{ include "node-feature-discovery.fullname" . }}-prune + containers: + - name: nfd-master + securityContext: + {{- toYaml .Values.master.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "nfd-master" + args: + - "-prune" + {{- if .Values.master.instance | empty | not }} + - "-instance={{ .Values.master.instance }}" + {{- end }} + restartPolicy: Never + {{- with .Values.master.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/prometheus.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/prometheus.yaml index b9f4b4640..3d680e24e 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/prometheus.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/prometheus.yaml @@ -12,7 +12,7 @@ metadata: spec: podMetricsEndpoints: - honorLabels: true - interval: 10s + interval: {{ .Values.prometheus.scrapeInterval }} path: /metrics port: metrics scheme: http diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml index c71ede442..52c69eb19 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.rbac.create }} +{{- if and .Values.worker.enable .Values.worker.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -15,5 +15,10 @@ rules: - create - get - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get {{- end }} - diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml index d8025be9b..a640d5f8b 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.rbac.create }} +{{- if and .Values.worker.enable .Values.worker.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml index 0d4789818..7191dca70 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml @@ -1,3 +1,4 @@ +{{- if and (not (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi)) .Values.master.enable }} apiVersion: v1 kind: Service metadata: @@ -16,3 +17,4 @@ spec: selector: {{- include "node-feature-discovery.selectorLabels" . | nindent 4 }} role: master +{{- end}} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml index 34dc8b753..59edc5e6c 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml @@ -1,4 +1,4 @@ -{{- if .Values.master.serviceAccount.create -}} +{{- if and .Values.master.enable .Values.master.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: @@ -27,7 +27,7 @@ metadata: {{- end }} {{- end }} -{{- if and .Values.gc.enable .Values.gc.serviceAccount.create (or .Values.enableNodeFeatureApi .Values.topologyUpdater.enable) }} +{{- if and .Values.gc.enable .Values.gc.serviceAccount.create (or (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi) .Values.topologyUpdater.enable) }} --- apiVersion: v1 kind: ServiceAccount @@ -42,7 +42,7 @@ metadata: {{- end }} {{- end }} -{{- if .Values.worker.serviceAccount.create }} +{{- if and .Values.worker.enable .Values.worker.serviceAccount.create }} --- apiVersion: v1 kind: ServiceAccount diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/topologyupdater.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/topologyupdater.yaml index f51c10e6d..ba0214c88 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/topologyupdater.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/topologyupdater.yaml @@ -12,6 +12,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: + revisionHistoryLimit: {{ .Values.topologyUpdater.revisionHistoryLimit }} selector: matchLabels: {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} @@ -28,16 +29,24 @@ spec: spec: serviceAccountName: {{ include "node-feature-discovery.topologyUpdater.serviceAccountName" . }} dnsPolicy: ClusterFirstWithHostNet + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} securityContext: {{- toYaml .Values.topologyUpdater.podSecurityContext | nindent 8 }} + hostNetwork: {{ .Values.topologyUpdater.hostNetwork }} containers: - name: topology-updater image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: "{{ .Values.image.pullPolicy }}" + livenessProbe: + {{- toYaml .Values.topologyUpdater.livenessProbe | nindent 10 }} + readinessProbe: + {{- toYaml .Values.topologyUpdater.readinessProbe | nindent 10 }} env: - name: NODE_NAME valueFrom: @@ -47,6 +56,9 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + {{- with .Values.topologyUpdater.extraEnvs }} + {{- toYaml . | nindent 8 }} + {{- end}} command: - "nfd-topology-updater" args: @@ -66,8 +78,8 @@ spec: - "-key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" - "-cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" {{- end }} - {{- if .Values.topologyUpdater.podSetFingerprint }} - - "-pods-fingerprint" + {{- if not .Values.topologyUpdater.podSetFingerprint }} + - "-pods-fingerprint=false" {{- end }} {{- if .Values.topologyUpdater.kubeletConfigPath | empty | not }} - "-kubelet-config-uri=file:///host-var/kubelet-config" @@ -77,9 +89,12 @@ spec: - "-kubelet-state-dir=" {{- end }} - -metrics={{ .Values.topologyUpdater.metricsPort | default "8081"}} + - "-grpc-health={{ .Values.topologyUpdater.healthPort | default "8082" }}" ports: - - name: metrics - containerPort: {{ .Values.topologyUpdater.metricsPort | default "8081"}} + - containerPort: {{ .Values.topologyUpdater.metricsPort | default "8081"}} + name: metrics + - containerPort: {{ .Values.topologyUpdater.healthPort | default "8082" }} + name: health volumeMounts: {{- if .Values.topologyUpdater.kubeletConfigPath | empty | not }} - name: kubelet-config diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml index 0e56eb5d1..755466c75 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml @@ -1,3 +1,4 @@ +{{- if .Values.worker.enable }} apiVersion: apps/v1 kind: DaemonSet metadata: @@ -11,6 +12,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: + revisionHistoryLimit: {{ .Values.worker.revisionHistoryLimit }} selector: matchLabels: {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }} @@ -26,6 +28,9 @@ spec: {{- end }} spec: dnsPolicy: ClusterFirstWithHostNet + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} @@ -33,35 +38,57 @@ spec: serviceAccountName: {{ include "node-feature-discovery.worker.serviceAccountName" . }} securityContext: {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} + hostNetwork: {{ .Values.worker.hostNetwork }} containers: - name: worker securityContext: {{- toYaml .Values.worker.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + livenessProbe: + {{- toYaml .Values.worker.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.worker.readinessProbe | nindent 12 }} env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + {{- with .Values.worker.extraEnvs }} + {{- toYaml . | nindent 8 }} + {{- end}} resources: {{- toYaml .Values.worker.resources | nindent 12 }} command: - "nfd-worker" args: +{{- if not (and .Values.featureGates.NodeFeatureAPI .Values.enableNodeFeatureApi) }} - "-server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}" - {{- if not .Values.enableNodeFeatureApi }} - - "-enable-nodefeature-api=false" - {{- end }} +{{- end }} {{- if .Values.tls.enable }} - "-ca-file=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - "-key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key" - "-cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt" +{{- end }} +# Go over featureGate and add the feature-gate flag +{{- range $key, $value := .Values.featureGates }} + - "-feature-gates={{ $key }}={{ $value }}" {{- end }} - "-metrics={{ .Values.worker.metricsPort | default "8081"}}" + - "-grpc-health={{ .Values.worker.healthPort | default "8082" }}" ports: - - name: metrics - containerPort: {{ .Values.worker.metricsPort | default "8081"}} + - containerPort: {{ .Values.worker.metricsPort | default "8081"}} + name: metrics + - containerPort: {{ .Values.worker.healthPort | default "8082" }} + name: health volumeMounts: - name: host-boot mountPath: "/host-boot" @@ -78,6 +105,9 @@ spec: - name: host-lib mountPath: "/host-lib" readOnly: true + - name: host-proc-swaps + mountPath: "/host-proc/swaps" + readOnly: true {{- if .Values.worker.mountUsrSrc }} - name: host-usr-src mountPath: "/host-usr/src" @@ -113,6 +143,9 @@ spec: - name: host-lib hostPath: path: "/lib" + - name: host-proc-swaps + hostPath: + path: "/proc/swaps" {{- if .Values.worker.mountUsrSrc }} - name: host-usr-src hostPath: @@ -150,3 +183,4 @@ spec: {{- with .Values.worker.priorityClassName }} priorityClassName: {{ . | quote }} {{- end }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/values.yaml b/deployments/gpu-operator/charts/node-feature-discovery/values.yaml index 2291aef4f..2d24983db 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/values.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/values.yaml @@ -12,9 +12,19 @@ namespaceOverride: "" enableNodeFeatureApi: true +featureGates: + NodeFeatureAPI: true + NodeFeatureGroupAPI: false + +priorityClassName: "" + master: + enable: true + extraEnvs: [] + hostNetwork: false config: ### # noPublish: false + # autoDefaultNs: true # extraLabelNs: ["added.ns.io","added.kubernets.io"] # denyLabelNs: ["denied.ns.io","denied.kubernetes.io"] # resourceLabels: ["vendor-1.com/feature-1","vendor-2.io/feature-2"] @@ -45,8 +55,11 @@ master: # nfdApiParallelism: 10 ### # The TCP port that nfd-master listens for incoming requests. Default: 8080 + # Deprecated this parameter is related to the deprecated gRPC API and will + # be removed with it in a future release port: 8080 metricsPort: 8081 + healthPort: 8082 instance: featureApi: resyncPeriod: @@ -79,6 +92,9 @@ master: # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: + + # specify how many old ReplicaSets for the Deployment to retain. + revisionHistoryLimit: rbac: create: true @@ -87,17 +103,17 @@ master: type: ClusterIP port: 8080 - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + resources: + limits: + memory: 4Gi + requests: + cpu: 100m + # You may want to use the same value for `requests.memory` and `limits.memory`. The “requests” value affects scheduling to accommodate pods on nodes. + # If there is a large difference between “requests” and “limits” and nodes experience memory pressure, the kernel may invoke + # the OOM Killer, even if the memory does not exceed the “limits” threshold. This can cause unexpected pod evictions. Memory + # cannot be compressed and once allocated to a pod, it can only be reclaimed by killing the pod. + # Natan Yellin 22/09/2022 https://home.robusta.dev/blog/kubernetes-memory-limit + memory: 128Mi nodeSelector: {} @@ -128,8 +144,24 @@ master: - key: "node-role.kubernetes.io/control-plane" operator: In values: [""] + + livenessProbe: + grpc: + port: 8082 + initialDelaySeconds: 10 + # failureThreshold: 3 + # periodSeconds: 10 + readinessProbe: + grpc: + port: 8082 + initialDelaySeconds: 5 + failureThreshold: 10 + # periodSeconds: 10 worker: + enable: true + extraEnvs: [] + hostNetwork: false config: ### #core: # labelWhiteList: @@ -157,6 +189,7 @@ worker: # cpuid: ## NOTE: whitelist has priority over blacklist # attributeBlacklist: + # - "AVX10" # - "BMI1" # - "BMI2" # - "CLMUL" @@ -215,7 +248,7 @@ worker: # # The following feature demonstrates the capabilities of the matchFeatures # - name: "my custom rule" # labels: - # my-ng-feature: "true" + # "vendor.io/my-ng-feature": "true" # # matchFeatures implements a logical AND over all matcher terms in the # # list (i.e. all of the terms, or per-feature matchers, must match) # matchFeatures: @@ -286,7 +319,7 @@ worker: # # The following feature demonstrates the capabilities of the matchAny # - name: "my matchAny rule" # labels: - # my-ng-feature-2: "my-value" + # "vendor.io/my-ng-feature-2": "my-value" # # matchAny implements a logical IF over all elements (sub-matchers) in # # the list (i.e. at least one feature matcher must match) # matchAny: @@ -307,10 +340,17 @@ worker: # vendor: {op: In, value: ["8086"]} # class: {op: In, value: ["02"]} # + # - name: "avx wildcard rule" + # labels: + # "my-avx-feature": "true" + # matchFeatures: + # - feature: cpu.cpuid + # matchName: {op: InRegexp, value: ["^AVX512"]} + # # # The following features demonstreate label templating capabilities # - name: "my template rule" # labelsTemplate: | - # {{ range .system.osrelease }}my-system-feature.{{ .Name }}={{ .Value }} + # {{ range .system.osrelease }}vendor.io/my-system-feature.{{ .Name }}={{ .Value }} # {{ end }} # matchFeatures: # - feature: system.osrelease @@ -320,7 +360,7 @@ worker: # # - name: "my template rule 2" # labelsTemplate: | - # {{ range .pci.device }}my-pci-device.{{ .class }}-{{ .device }}=with-cpuid + # {{ range .pci.device }}vendor.io/my-pci-device.{{ .class }}-{{ .device }}=with-cpuid # {{ end }} # matchFeatures: # - feature: pci.device @@ -335,7 +375,7 @@ worker: # # previous labels and vars # - name: "my dummy kernel rule" # labels: - # "my.kernel.feature": "true" + # "vendor.io/my.kernel.feature": "true" # matchFeatures: # - feature: kernel.version # matchExpressions: @@ -350,16 +390,24 @@ worker: # # - name: "my rule using backrefs" # labels: - # "my.backref.feature": "true" + # "vendor.io/my.backref.feature": "true" # matchFeatures: # - feature: rule.matched # matchExpressions: - # my.kernel.feature: {op: IsTrue} + # vendor.io/my.kernel.feature: {op: IsTrue} # my.dummy.var: {op: Gt, value: ["0"]} # + # - name: "kconfig template rule" + # labelsTemplate: | + # {{ range .kernel.config }}kconfig-{{ .Name }}={{ .Value }} + # {{ end }} + # matchFeatures: + # - feature: kernel.config + # matchName: {op: In, value: ["SWAP", "X86", "ARM"]} ### metricsPort: 8081 + healthPort: 8082 daemonsetAnnotations: {} podSecurityContext: {} # fsGroup: 2000 @@ -372,6 +420,19 @@ worker: runAsNonRoot: true # runAsUser: 1000 + livenessProbe: + grpc: + port: 8082 + initialDelaySeconds: 10 + # failureThreshold: 3 + # periodSeconds: 10 + readinessProbe: + grpc: + port: 8082 + initialDelaySeconds: 5 + failureThreshold: 10 + # periodSeconds: 10 + serviceAccount: # Specifies whether a service account should be created. # We create this by default to make it easier for downstream users to apply PodSecurityPolicies. @@ -382,6 +443,9 @@ worker: # If not set and create is true, a name is generated using the fullname template name: + # specify how many old ControllerRevisions for the DaemonSet to retain. + revisionHistoryLimit: + rbac: create: true @@ -389,17 +453,12 @@ worker: # Does not work on systems without /usr/src AND a read-only /usr, such as Talos mountUsrSrc: false - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + resources: + limits: + memory: 512Mi + requests: + cpu: 5m + memory: 64Mi nodeSelector: {} @@ -424,15 +483,22 @@ topologyUpdater: enable: false createCRDs: false + extraEnvs: [] + hostNetwork: false serviceAccount: create: true annotations: {} name: + + # specify how many old ControllerRevisions for the DaemonSet to retain. + revisionHistoryLimit: + rbac: create: true metricsPort: 8081 + healthPort: 8082 kubeletConfigPath: kubeletPodResourcesSockPath: updateInterval: 60s @@ -446,18 +512,26 @@ topologyUpdater: drop: [ "ALL" ] readOnlyRootFilesystem: true runAsUser: 0 - - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + + livenessProbe: + grpc: + port: 8082 + initialDelaySeconds: 10 + # failureThreshold: 3 + # periodSeconds: 10 + readinessProbe: + grpc: + port: 8082 + initialDelaySeconds: 5 + failureThreshold: 10 + # periodSeconds: 10 + + resources: + limits: + memory: 60Mi + requests: + cpu: 50m + memory: 40Mi nodeSelector: {} tolerations: [] @@ -468,6 +542,8 @@ topologyUpdater: gc: enable: true + extraEnvs: [] + hostNetwork: false replicaCount: 1 serviceAccount: @@ -481,17 +557,14 @@ gc: podSecurityContext: {} - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + resources: + limits: + memory: 1Gi + requests: + cpu: 10m + memory: 128Mi + + metricsPort: 8081 nodeSelector: {} tolerations: [] @@ -499,6 +572,9 @@ gc: deploymentAnnotations: {} affinity: {} + # specify how many old ReplicaSets for the Deployment to retain. + revisionHistoryLimit: + # Optionally use encryption for worker <--> master comms # TODO: verify hostname is not yet supported # @@ -507,7 +583,11 @@ gc: tls: enable: false certManager: false + certManagerCertificate: + issuerKind: + issuerName: prometheus: enable: false + scrapeInterval: 10s labels: {} diff --git a/deployments/gpu-operator/crds/nvidia.com_clusterpolicies.yaml b/deployments/gpu-operator/crds/nvidia.com_clusterpolicies.yaml new file mode 100644 index 000000000..54e4a652b --- /dev/null +++ b/deployments/gpu-operator/crds/nvidia.com_clusterpolicies.yaml @@ -0,0 +1,2384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusterpolicies.nvidia.com +spec: + group: nvidia.com + names: + kind: ClusterPolicy + listKind: ClusterPolicyList + plural: clusterpolicies + singular: clusterpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterPolicy is the Schema for the clusterpolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterPolicySpec defines the desired state of ClusterPolicy + properties: + ccManager: + description: CCManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + defaultMode: + description: Default CC mode setting for compatible GPUs on the + node + enum: + - "on" + - "off" + - devtools + type: string + enabled: + description: Enabled indicates if deployment of CC Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: CC Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: CC Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: CC Manager image tag + type: string + type: object + cdi: + description: CDI configures how the Container Device Interface is + used in the cluster + properties: + default: + default: false + description: Default indicates whether to use CDI as the default + mechanism for providing GPU access to containers. + type: boolean + enabled: + default: false + description: Enabled indicates whether CDI can be used to make + GPUs accessible to containers. + type: boolean + type: object + daemonsets: + description: Daemonset defines common configuration for all Daemonsets + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + priorityClassName: + type: string + rollingUpdate: + description: 'Optional: Configuration for rolling update of all + DaemonSet pods' + properties: + maxUnavailable: + type: string + type: object + tolerations: + description: 'Optional: Set tolerations' + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + updateStrategy: + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + type: string + type: object + dcgm: + description: DCGM component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Hostengine + as a separate pod is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + hostPort: + description: 'Deprecated: HostPort represents host port that needs + to be bound for DCGM engine (Default: 5555)' + format: int32 + type: integer + image: + description: NVIDIA DCGM image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA DCGM image tag + type: string + type: object + dcgmExporter: + description: DCGMExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom metrics configuration for NVIDIA + DCGM Exporter' + properties: + name: + description: ConfigMap name with file dcgm-metrics.csv for + metrics to be collected by NVIDIA DCGM Exporter + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA DCGM Exporter + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA DCGM Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA DCGM Exporter image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + serviceMonitor: + description: 'Optional: ServiceMonitor configuration for NVIDIA + DCGM Exporter' + properties: + additionalLabels: + additionalProperties: + type: string + description: AdditionalLabels to add to ServiceMonitor instance + for NVIDIA DCGM Exporter + type: object + enabled: + description: Enabled indicates if ServiceMonitor is deployed + for NVIDIA DCGM Exporter + type: boolean + honorLabels: + description: HonorLabels chooses the metric’s labels on collisions + with target labels. + type: boolean + interval: + description: |- + Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. + Supported units: y, w, d, h, m, s, ms + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + relabelings: + description: Relabelings allows to rewrite labels on metric + sets for NVIDIA DCGM Exporter + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + version: + description: NVIDIA DCGM Exporter image tag + type: string + type: object + devicePlugin: + description: DevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Configuration for the NVIDIA Device Plugin + via the ConfigMap' + properties: + default: + description: Default config name within the ConfigMap for + the NVIDIA Device Plugin config + type: string + name: + description: ConfigMap name for NVIDIA Device Plugin config + including shared config between plugin and GFD + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Device + Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object + repository: + description: NVIDIA Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Device Plugin image tag + type: string + type: object + driver: + description: Driver component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + certConfig: + description: 'Optional: Custom certificates configuration for + NVIDIA Driver container' + properties: + name: + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA Driver + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + kernelModuleConfig: + description: 'Optional: Kernel module configuration parameters + for the NVIDIA Driver' + properties: + name: + type: string + type: object + licensingConfig: + description: 'Optional: Licensing configuration for NVIDIA vGPU + licensing' + properties: + configMapName: + type: string + nlsEnabled: + description: NLSEnabled indicates if NVIDIA Licensing System + is used for licensing. + type: boolean + type: object + livenessProbe: + description: NVIDIA Driver container liveness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + manager: + description: Manager represents configuration for NVIDIA Driver + Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + rdma: + description: GPUDirectRDMASpec defines the properties for nvidia-peermem + deployment + properties: + enabled: + description: Enabled indicates if GPUDirect RDMA is enabled + through GPU operator + type: boolean + useHostMofed: + description: UseHostMOFED indicates to use MOFED drivers directly + installed on the host to enable GPUDirect RDMA + type: boolean + type: object + readinessProbe: + description: NVIDIA Driver container readiness probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + repoConfig: + description: 'Optional: Custom repo configuration for NVIDIA Driver + container' + properties: + configMapName: + type: string + type: object + repository: + description: NVIDIA Driver image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + startupProbe: + description: NVIDIA Driver container startup probe settings + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + minimum: 1 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + minimum: 1 + type: integer + type: object + upgradePolicy: + description: Driver auto-upgrade settings + properties: + autoUpgrade: + default: false + description: |- + AutoUpgrade is a global switch for automatic upgrade feature + if set to false all other options are ignored + type: boolean + drain: + description: DrainSpec describes configuration for node drain + during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the node is drained) + type: boolean + enable: + default: false + description: Enable indicates if node draining is allowed + during upgrade + type: boolean + force: + default: false + description: Force indicates if force draining is allowed + type: boolean + podSelector: + description: |- + PodSelector specifies a label selector to filter pods on the node that need to be drained + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 300 + description: TimeoutSecond specifies the length of time + in seconds to wait before giving up drain, zero means + infinite + minimum: 0 + type: integer + type: object + maxParallelUpgrades: + default: 1 + description: |- + MaxParallelUpgrades indicates how many nodes can be upgraded in parallel + 0 means no limit, all nodes will be upgraded in parallel + minimum: 0 + type: integer + maxUnavailable: + anyOf: + - type: integer + - type: string + default: 25% + description: |- + MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. + Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). + Absolute number is calculated from percentage by rounding up. + By default, a fixed value of 25% is used. + x-kubernetes-int-or-string: true + podDeletion: + description: PodDeletionSpec describes configuration for deletion + of pods using special resources during automatic upgrade + properties: + deleteEmptyDir: + default: false + description: |- + DeleteEmptyDir indicates if should continue even if there are pods using emptyDir + (local data that will be deleted when the pod is deleted) + type: boolean + force: + default: false + description: Force indicates if force deletion is allowed + type: boolean + timeoutSeconds: + default: 300 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + waitForCompletion: + description: WaitForCompletionSpec describes the configuration + for waiting on job completions + properties: + podSelector: + description: |- + PodSelector specifies a label selector for the pods to wait for completion + For more details on label selectors, see: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + type: string + timeoutSeconds: + default: 0 + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite + minimum: 0 + type: integer + type: object + type: object + useNvidiaDriverCRD: + description: UseNvidiaDriverCRD indicates if the deployment of + NVIDIA Driver is managed by the NVIDIADriver CRD type + type: boolean + useOpenKernelModules: + description: UseOpenKernelModules indicates if the open GPU kernel + modules should be used + type: boolean + usePrecompiled: + description: UsePrecompiled indicates if deployment of NVIDIA + Driver using pre-compiled modules is enabled + type: boolean + version: + description: NVIDIA Driver image tag + type: string + virtualTopology: + description: 'Optional: Virtual Topology Daemon configuration + for NVIDIA vGPU drivers' + properties: + config: + description: 'Optional: Config name representing virtual topology + daemon configuration file nvidia-topologyd.conf' + type: string + type: object + type: object + gdrcopy: + description: GDRCopy component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GDRCopy is enabled through GPU + Operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GDRCopy driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GDRCopy driver image repository + type: string + version: + description: NVIDIA GDRCopy driver image tag + type: string + type: object + gds: + description: GPUDirectStorage defines the spec for GDS components(Experimental) + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if GPUDirect Storage is enabled + through GPU operator + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA GPUDirect Storage Driver image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA GPUDirect Storage Driver image repository + type: string + version: + description: NVIDIA GPUDirect Storage Driver image tag + type: string + type: object + gfd: + description: GPUFeatureDiscovery spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of GPU Feature Discovery + Plugin is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: GFD image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: GFD image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: GFD image tag + type: string + type: object + hostPaths: + description: HostPaths defines various paths on the host needed by + GPU Operator components + properties: + driverInstallDir: + description: |- + DriverInstallDir represents the root at which driver files including libraries, + config files, and executables can be found. + type: string + rootFS: + description: |- + RootFS represents the path to the root filesystem of the host. + This is used by components that need to interact with the host filesystem + and as such this must be a chroot-able filesystem. + Examples include the MIG Manager and Toolkit Container which may need to + stop, start, or restart systemd services. + type: string + type: object + kataManager: + description: KataManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: Kata Manager config + properties: + artifactsDir: + default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses + description: |- + ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) + are placed on the local filesystem. + type: string + runtimeClasses: + description: RuntimeClasses is a list of kata runtime classes + to configure. + items: + description: RuntimeClass defines the configuration for + a kata RuntimeClass + properties: + artifacts: + description: Artifacts are the kata artifacts associated + with the runtime class. + properties: + pullSecret: + description: PullSecret is the secret used to pull + the OCI artifact. + type: string + url: + description: |- + URL is the path to the OCI artifact (payload) containing all artifacts + associated with a kata runtime class. + type: string + required: + - url + type: object + name: + description: Name is the name of the kata runtime class. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector specifies the nodeSelector for the RuntimeClass object. + This ensures pods running with the RuntimeClass only get scheduled + onto nodes which support it. + type: object + required: + - artifacts + - name + type: object + type: array + type: object + enabled: + description: Enabled indicates if deployment of Kata Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Kata Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Kata Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Kata Manager image tag + type: string + type: object + mig: + description: MIG spec + properties: + strategy: + description: 'Optional: MIGStrategy to apply for GFD and NVIDIA + Device Plugin' + enum: + - none + - single + - mixed + type: string + type: object + migManager: + description: MIGManager for configuration to deploy MIG Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: 'Optional: Custom mig-parted configuration for NVIDIA + MIG Manager container' + properties: + default: + default: all-disabled + description: Default MIG config to be applied on the node, + when there is no config specified with the node label nvidia.com/mig.config + enum: + - all-disabled + - "" + type: string + name: + default: default-mig-parted-config + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA MIG Manager + is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + gpuClientsConfig: + description: 'Optional: Custom gpu-clients configuration for NVIDIA + MIG Manager container' + properties: + name: + description: ConfigMap name + type: string + type: object + image: + description: NVIDIA MIG Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA MIG Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA MIG Manager image tag + type: string + type: object + nodeStatusExporter: + description: NodeStatusExporter spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of Node Status Exporter + is enabled. + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Node Status Exporter image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Node Status Exporterimage repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: Node Status Exporterimage tag + type: string + type: object + operator: + description: Operator component spec + properties: + annotations: + additionalProperties: + type: string + description: |- + Optional: Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + type: object + defaultRuntime: + default: docker + description: Runtime defines container runtime type + enum: + - docker + - crio + - containerd + type: string + initContainer: + description: InitContainerSpec describes configuration for initContainer + image used with all components + properties: + image: + description: Image represents image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents image repository path + type: string + version: + description: Version represents image tag(version) + type: string + type: object + labels: + additionalProperties: + type: string + description: |- + Optional: Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + type: object + runtimeClass: + default: nvidia + type: string + use_ocp_driver_toolkit: + description: UseOpenShiftDriverToolkit indicates if DriverToolkit + image should be used on OpenShift to build and install driver + modules + type: boolean + required: + - defaultRuntime + type: object + psa: + description: PSA defines spec for PodSecurityAdmission configuration + properties: + enabled: + description: Enabled indicates if PodSecurityAdmission configuration + needs to be enabled for all Pods + type: boolean + type: object + psp: + description: |- + Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead + PSP defines spec for handling PodSecurityPolicies + properties: + enabled: + description: Enabled indicates if PodSecurityPolicies needs to + be enabled for all Pods + type: boolean + type: object + sandboxDevicePlugin: + description: SandboxDevicePlugin component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Sandbox + Device Plugin through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Sandbox Device Plugin image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA Sandbox Device Plugin image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Sandbox Device Plugin image tag + type: string + type: object + sandboxWorkloads: + description: SandboxWorkloads defines the spec for handling sandbox + workloads (i.e. Virtual Machines) + properties: + defaultWorkload: + default: container + description: |- + DefaultWorkload indicates the default GPU workload type to configure + worker nodes in the cluster for + enum: + - container + - vm-passthrough + - vm-vgpu + type: string + enabled: + description: |- + Enabled indicates if the GPU Operator should manage additional operands required + for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) + type: boolean + type: object + toolkit: + description: Toolkit component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + enabled: + description: Enabled indicates if deployment of NVIDIA Container + Toolkit through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA Container Toolkit image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + installDir: + default: /usr/local/nvidia + description: Toolkit install directory on the host + type: string + repository: + description: NVIDIA Container Toolkit image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA Container Toolkit image tag + type: string + type: object + validator: + description: Validator defines the spec for operator-validator daemonset + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + cuda: + description: CUDA validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + driver: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Validator image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + plugin: + description: Plugin validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + repository: + description: Validator image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + toolkit: + description: Toolkit validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + version: + description: Validator image tag + type: string + vfioPCI: + description: VfioPCI validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuDevices: + description: VGPUDevices validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + vgpuManager: + description: VGPUManager validator spec + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + type: object + type: object + vfioManager: + description: VFIOManager for configuration to deploy VFIO-PCI Manager + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of VFIO Manager is + enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: VFIO Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: VFIO Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: VFIO Manager image tag + type: string + type: object + vgpuDeviceManager: + description: VGPUDeviceManager spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + config: + description: NVIDIA vGPU devices configuration for NVIDIA vGPU + Device Manager container + properties: + default: + default: default + description: Default config name within the ConfigMap + type: string + name: + description: ConfigMap name + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Device + Manager is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Device Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Device Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Device Manager image tag + type: string + type: object + vgpuManager: + description: VGPUManager component spec + properties: + args: + description: 'Optional: List of arguments' + items: + type: string + type: array + driverManager: + description: DriverManager represents configuration for NVIDIA + Driver Manager initContainer + properties: + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: Image represents NVIDIA Driver Manager image + name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: Repository represents Driver Managerrepository + path + type: string + version: + description: Version represents NVIDIA Driver Manager image + tag(version) + type: string + type: object + enabled: + description: Enabled indicates if deployment of NVIDIA vGPU Manager + through operator is enabled + type: boolean + env: + description: 'Optional: List of environment variables' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. + type: string + value: + description: Value of the environment variable. + type: string + required: + - name + type: object + type: array + image: + description: NVIDIA vGPU Manager image name + pattern: '[a-zA-Z0-9\-]+' + type: string + imagePullPolicy: + description: Image pull policy + type: string + imagePullSecrets: + description: Image pull secrets + items: + type: string + type: array + repository: + description: NVIDIA vGPU Manager image repository + type: string + resources: + description: 'Optional: Define resources requests and limits for + each pod' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + description: NVIDIA vGPU Manager image tag + type: string + type: object + required: + - daemonsets + - dcgm + - dcgmExporter + - devicePlugin + - driver + - gfd + - nodeStatusExporter + - operator + - toolkit + type: object + status: + description: ClusterPolicyStatus defines the observed state of ClusterPolicy + properties: + conditions: + description: Conditions is a list of conditions representing the ClusterPolicy's + current state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + namespace: + description: Namespace indicates a namespace in which the operator + is installed + type: string + state: + description: State indicates status of ClusterPolicy + enum: + - ignored + - ready + - notReady + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml b/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml deleted file mode 100644 index 16e35bf4b..000000000 --- a/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml +++ /dev/null @@ -1,2357 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.14.0 - name: clusterpolicies.nvidia.com -spec: - group: nvidia.com - names: - kind: ClusterPolicy - listKind: ClusterPolicyList - plural: clusterpolicies - singular: clusterpolicy - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.state - name: Status - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: string - name: v1 - schema: - openAPIV3Schema: - description: ClusterPolicy is the Schema for the clusterpolicies API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ClusterPolicySpec defines the desired state of ClusterPolicy - properties: - ccManager: - description: CCManager component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - defaultMode: - description: Default CC mode setting for compatible GPUs on the - node - enum: - - "on" - - "off" - - devtools - type: string - enabled: - description: Enabled indicates if deployment of CC Manager is - enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: CC Manager image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: CC Manager image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: CC Manager image tag - type: string - type: object - cdi: - description: CDI configures how the Container Device Interface is - used in the cluster - properties: - default: - default: false - description: Default indicates whether to use CDI as the default - mechanism for providing GPU access to containers. - type: boolean - enabled: - default: false - description: Enabled indicates whether CDI can be used to make - GPUs accessible to containers. - type: boolean - type: object - daemonsets: - description: Daemonset defines common configuration for all Daemonsets - properties: - annotations: - additionalProperties: - type: string - description: |- - Optional: Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - type: object - labels: - additionalProperties: - type: string - description: |- - Optional: Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - type: object - priorityClassName: - type: string - rollingUpdate: - description: 'Optional: Configuration for rolling update of all - DaemonSet pods' - properties: - maxUnavailable: - type: string - type: object - tolerations: - description: 'Optional: Set tolerations' - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - updateStrategy: - default: RollingUpdate - enum: - - RollingUpdate - - OnDelete - type: string - type: object - dcgm: - description: DCGM component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if deployment of NVIDIA DCGM Hostengine - as a separate pod is enabled. - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - hostPort: - description: 'HostPort represents host port that needs to be bound - for DCGM engine (Default: 5555)' - format: int32 - type: integer - image: - description: NVIDIA DCGM image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA DCGM image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA DCGM image tag - type: string - type: object - dcgmExporter: - description: DCGMExporter spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - config: - description: 'Optional: Custom metrics configuration for NVIDIA - DCGM Exporter' - properties: - name: - description: ConfigMap name with file dcgm-metrics.csv for - metrics to be collected by NVIDIA DCGM Exporter - type: string - type: object - enabled: - description: Enabled indicates if deployment of NVIDIA DCGM Exporter - through operator is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA DCGM Exporter image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA DCGM Exporter image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - serviceMonitor: - description: 'Optional: ServiceMonitor configuration for NVIDIA - DCGM Exporter' - properties: - additionalLabels: - additionalProperties: - type: string - description: AdditionalLabels to add to ServiceMonitor instance - for NVIDIA DCGM Exporter - type: object - enabled: - description: Enabled indicates if ServiceMonitor is deployed - for NVIDIA DCGM Exporter - type: boolean - honorLabels: - description: HonorLabels chooses the metric’s labels on collisions - with target labels. - type: boolean - interval: - description: |- - Interval which metrics should be scraped from NVIDIA DCGM Exporter. If not specified Prometheus’ global scrape interval is used. - Supported units: y, w, d, h, m, s, ms - pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ - type: string - relabelings: - description: Relabelings allows to rewrite labels on metric - sets for NVIDIA DCGM Exporter - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. - It defines ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs - properties: - action: - default: replace - description: |- - Action to perform based on regex matching. Default is 'replace'. - uppercase and lowercase actions require Prometheus >= 2.36. - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: Modulus to take of the hash of the source - label values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: |- - Replacement value against which a regex replace is performed if the - regular expression matches. Regex capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is concatenated - using the configured separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: - description: LabelName is a valid Prometheus label - name which may only contain ASCII letters, numbers, - as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting value is written in a replace action. - It is mandatory for replace actions. Regex capture groups are available. - type: string - type: object - type: array - type: object - version: - description: NVIDIA DCGM Exporter image tag - type: string - type: object - devicePlugin: - description: DevicePlugin component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - config: - description: 'Optional: Configuration for the NVIDIA Device Plugin - via the ConfigMap' - properties: - default: - description: Default config name within the ConfigMap for - the NVIDIA Device Plugin config - type: string - name: - description: ConfigMap name for NVIDIA Device Plugin config - including shared config between plugin and GFD - type: string - type: object - enabled: - description: Enabled indicates if deployment of NVIDIA Device - Plugin through operator is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA Device Plugin image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA Device Plugin image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA Device Plugin image tag - type: string - type: object - driver: - description: Driver component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - certConfig: - description: 'Optional: Custom certificates configuration for - NVIDIA Driver container' - properties: - name: - type: string - type: object - enabled: - description: Enabled indicates if deployment of NVIDIA Driver - through operator is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA Driver image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - kernelModuleConfig: - description: 'Optional: Kernel module configuration parameters - for the NVIDIA Driver' - properties: - name: - type: string - type: object - licensingConfig: - description: 'Optional: Licensing configuration for NVIDIA vGPU - licensing' - properties: - configMapName: - type: string - nlsEnabled: - description: NLSEnabled indicates if NVIDIA Licensing System - is used for licensing. - type: boolean - type: object - livenessProbe: - description: NVIDIA Driver container liveness probe settings - properties: - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - minimum: 1 - type: integer - type: object - manager: - description: Manager represents configuration for NVIDIA Driver - Manager initContainer - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: Image represents NVIDIA Driver Manager image - name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: Repository represents Driver Managerrepository - path - type: string - version: - description: Version represents NVIDIA Driver Manager image - tag(version) - type: string - type: object - rdma: - description: GPUDirectRDMASpec defines the properties for nvidia-peermem - deployment - properties: - enabled: - description: Enabled indicates if GPUDirect RDMA is enabled - through GPU operator - type: boolean - useHostMofed: - description: UseHostMOFED indicates to use MOFED drivers directly - installed on the host to enable GPUDirect RDMA - type: boolean - type: object - readinessProbe: - description: NVIDIA Driver container readiness probe settings - properties: - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - minimum: 1 - type: integer - type: object - repoConfig: - description: 'Optional: Custom repo configuration for NVIDIA Driver - container' - properties: - configMapName: - type: string - type: object - repository: - description: NVIDIA Driver image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - startupProbe: - description: NVIDIA Driver container startup probe settings - properties: - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - minimum: 1 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - minimum: 1 - type: integer - type: object - upgradePolicy: - description: Driver auto-upgrade settings - properties: - autoUpgrade: - default: false - description: |- - AutoUpgrade is a global switch for automatic upgrade feature - if set to false all other options are ignored - type: boolean - drain: - description: DrainSpec describes configuration for node drain - during automatic upgrade - properties: - deleteEmptyDir: - default: false - description: |- - DeleteEmptyDir indicates if should continue even if there are pods using emptyDir - (local data that will be deleted when the node is drained) - type: boolean - enable: - default: false - description: Enable indicates if node draining is allowed - during upgrade - type: boolean - force: - default: false - description: Force indicates if force draining is allowed - type: boolean - podSelector: - description: |- - PodSelector specifies a label selector to filter pods on the node that need to be drained - For more details on label selectors, see: - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - type: string - timeoutSeconds: - default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up drain, zero means - infinite - minimum: 0 - type: integer - type: object - maxParallelUpgrades: - default: 1 - description: |- - MaxParallelUpgrades indicates how many nodes can be upgraded in parallel - 0 means no limit, all nodes will be upgraded in parallel - minimum: 0 - type: integer - maxUnavailable: - anyOf: - - type: integer - - type: string - default: 25% - description: |- - MaxUnavailable is the maximum number of nodes with the driver installed, that can be unavailable during the upgrade. - Value can be an absolute number (ex: 5) or a percentage of total nodes at the start of upgrade (ex: 10%). - Absolute number is calculated from percentage by rounding up. - By default, a fixed value of 25% is used. - x-kubernetes-int-or-string: true - podDeletion: - description: PodDeletionSpec describes configuration for deletion - of pods using special resources during automatic upgrade - properties: - deleteEmptyDir: - default: false - description: |- - DeleteEmptyDir indicates if should continue even if there are pods using emptyDir - (local data that will be deleted when the pod is deleted) - type: boolean - force: - default: false - description: Force indicates if force deletion is allowed - type: boolean - timeoutSeconds: - default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite - minimum: 0 - type: integer - type: object - waitForCompletion: - description: WaitForCompletionSpec describes the configuration - for waiting on job completions - properties: - podSelector: - description: |- - PodSelector specifies a label selector for the pods to wait for completion - For more details on label selectors, see: - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - type: string - timeoutSeconds: - default: 0 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite - minimum: 0 - type: integer - type: object - type: object - useNvidiaDriverCRD: - description: UseNvidiaDriverCRD indicates if the deployment of - NVIDIA Driver is managed by the NVIDIADriver CRD type - type: boolean - useOpenKernelModules: - description: UseOpenKernelModules indicates if the open GPU kernel - modules should be used - type: boolean - usePrecompiled: - description: UsePrecompiled indicates if deployment of NVIDIA - Driver using pre-compiled modules is enabled - type: boolean - version: - description: NVIDIA Driver image tag - type: string - virtualTopology: - description: 'Optional: Virtual Topology Daemon configuration - for NVIDIA vGPU drivers' - properties: - config: - description: 'Optional: Config name representing virtual topology - daemon configuration file nvidia-topologyd.conf' - type: string - type: object - type: object - gdrcopy: - description: GDRCopy component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if GDRCopy is enabled through GPU - Operator - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA GDRCopy driver image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA GDRCopy driver image repository - type: string - version: - description: NVIDIA GDRCopy driver image tag - type: string - type: object - gds: - description: GPUDirectStorage defines the spec for GDS components(Experimental) - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if GPUDirect Storage is enabled - through GPU operator - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA GPUDirect Storage Driver image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA GPUDirect Storage Driver image repository - type: string - version: - description: NVIDIA GPUDirect Storage Driver image tag - type: string - type: object - gfd: - description: GPUFeatureDiscovery spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if deployment of GPU Feature Discovery - Plugin is enabled. - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: GFD image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: GFD image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: GFD image tag - type: string - type: object - kataManager: - description: KataManager component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - config: - description: Kata Manager config - properties: - artifactsDir: - default: /opt/nvidia-gpu-operator/artifacts/runtimeclasses - description: |- - ArtifactsDir is the directory where kata artifacts (e.g. kernel / guest images, configuration, etc.) - are placed on the local filesystem. - type: string - runtimeClasses: - description: RuntimeClasses is a list of kata runtime classes - to configure. - items: - description: RuntimeClass defines the configuration for - a kata RuntimeClass - properties: - artifacts: - description: Artifacts are the kata artifacts associated - with the runtime class. - properties: - pullSecret: - description: PullSecret is the secret used to pull - the OCI artifact. - type: string - url: - description: |- - URL is the path to the OCI artifact (payload) containing all artifacts - associated with a kata runtime class. - type: string - required: - - url - type: object - name: - description: Name is the name of the kata runtime class. - type: string - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector specifies the nodeSelector for the RuntimeClass object. - This ensures pods running with the RuntimeClass only get scheduled - onto nodes which support it. - type: object - required: - - artifacts - - name - type: object - type: array - type: object - enabled: - description: Enabled indicates if deployment of Kata Manager is - enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: Kata Manager image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: Kata Manager image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: Kata Manager image tag - type: string - type: object - mig: - description: MIG spec - properties: - strategy: - description: 'Optional: MIGStrategy to apply for GFD and NVIDIA - Device Plugin' - enum: - - none - - single - - mixed - type: string - type: object - migManager: - description: MIGManager for configuration to deploy MIG Manager - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - config: - description: 'Optional: Custom mig-parted configuration for NVIDIA - MIG Manager container' - properties: - default: - default: all-disabled - description: Default MIG config to be applied on the node, - when there is no config specified with the node label nvidia.com/mig.config - enum: - - all-disabled - - "" - type: string - name: - default: default-mig-parted-config - description: ConfigMap name - type: string - type: object - enabled: - description: Enabled indicates if deployment of NVIDIA MIG Manager - is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - gpuClientsConfig: - description: 'Optional: Custom gpu-clients configuration for NVIDIA - MIG Manager container' - properties: - name: - description: ConfigMap name - type: string - type: object - image: - description: NVIDIA MIG Manager image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA MIG Manager image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA MIG Manager image tag - type: string - type: object - nodeStatusExporter: - description: NodeStatusExporter spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if deployment of Node Status Exporter - is enabled. - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: Node Status Exporter image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: Node Status Exporterimage repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: Node Status Exporterimage tag - type: string - type: object - operator: - description: Operator component spec - properties: - annotations: - additionalProperties: - type: string - description: |- - Optional: Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - type: object - defaultRuntime: - default: docker - description: Runtime defines container runtime type - enum: - - docker - - crio - - containerd - type: string - initContainer: - description: InitContainerSpec describes configuration for initContainer - image used with all components - properties: - image: - description: Image represents image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: Repository represents image repository path - type: string - version: - description: Version represents image tag(version) - type: string - type: object - labels: - additionalProperties: - type: string - description: |- - Optional: Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - type: object - runtimeClass: - default: nvidia - type: string - use_ocp_driver_toolkit: - description: UseOpenShiftDriverToolkit indicates if DriverToolkit - image should be used on OpenShift to build and install driver - modules - type: boolean - required: - - defaultRuntime - type: object - psa: - description: PSA defines spec for PodSecurityAdmission configuration - properties: - enabled: - description: Enabled indicates if PodSecurityAdmission configuration - needs to be enabled for all Pods - type: boolean - type: object - psp: - description: |- - Deprecated: Pod Security Policies are no longer supported. Please use PodSecurityAdmission instead - PSP defines spec for handling PodSecurityPolicies - properties: - enabled: - description: Enabled indicates if PodSecurityPolicies needs to - be enabled for all Pods - type: boolean - type: object - sandboxDevicePlugin: - description: SandboxDevicePlugin component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if deployment of NVIDIA Sandbox - Device Plugin through operator is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA Sandbox Device Plugin image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA Sandbox Device Plugin image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA Sandbox Device Plugin image tag - type: string - type: object - sandboxWorkloads: - description: SandboxWorkloads defines the spec for handling sandbox - workloads (i.e. Virtual Machines) - properties: - defaultWorkload: - default: container - description: |- - DefaultWorkload indicates the default GPU workload type to configure - worker nodes in the cluster for - enum: - - container - - vm-passthrough - - vm-vgpu - type: string - enabled: - description: |- - Enabled indicates if the GPU Operator should manage additional operands required - for sandbox workloads (i.e. VFIO Manager, vGPU Manager, and additional device plugins) - type: boolean - type: object - toolkit: - description: Toolkit component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - enabled: - description: Enabled indicates if deployment of NVIDIA Container - Toolkit through operator is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA Container Toolkit image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - installDir: - default: /usr/local/nvidia - description: Toolkit install directory on the host - type: string - repository: - description: NVIDIA Container Toolkit image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA Container Toolkit image tag - type: string - type: object - validator: - description: Validator defines the spec for operator-validator daemonset - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - cuda: - description: CUDA validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - driver: - description: Toolkit validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: Validator image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - plugin: - description: Plugin validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - repository: - description: Validator image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - toolkit: - description: Toolkit validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - version: - description: Validator image tag - type: string - vfioPCI: - description: VfioPCI validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - vgpuDevices: - description: VGPUDevices validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - vgpuManager: - description: VGPUManager validator spec - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - type: object - type: object - vfioManager: - description: VFIOManager for configuration to deploy VFIO-PCI Manager - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - driverManager: - description: DriverManager represents configuration for NVIDIA - Driver Manager - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: Image represents NVIDIA Driver Manager image - name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: Repository represents Driver Managerrepository - path - type: string - version: - description: Version represents NVIDIA Driver Manager image - tag(version) - type: string - type: object - enabled: - description: Enabled indicates if deployment of VFIO Manager is - enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: VFIO Manager image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: VFIO Manager image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: VFIO Manager image tag - type: string - type: object - vgpuDeviceManager: - description: VGPUDeviceManager spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - config: - description: NVIDIA vGPU devices configuration for NVIDIA vGPU - Device Manager container - properties: - default: - default: default - description: Default config name within the ConfigMap - type: string - name: - description: ConfigMap name - type: string - type: object - enabled: - description: Enabled indicates if deployment of NVIDIA vGPU Device - Manager is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA vGPU Device Manager image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA vGPU Device Manager image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA vGPU Device Manager image tag - type: string - type: object - vgpuManager: - description: VGPUManager component spec - properties: - args: - description: 'Optional: List of arguments' - items: - type: string - type: array - driverManager: - description: DriverManager represents configuration for NVIDIA - Driver Manager initContainer - properties: - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: Image represents NVIDIA Driver Manager image - name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: Repository represents Driver Managerrepository - path - type: string - version: - description: Version represents NVIDIA Driver Manager image - tag(version) - type: string - type: object - enabled: - description: Enabled indicates if deployment of NVIDIA vGPU Manager - through operator is enabled - type: boolean - env: - description: 'Optional: List of environment variables' - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. - type: string - value: - description: Value of the environment variable. - type: string - required: - - name - type: object - type: array - image: - description: NVIDIA vGPU Manager image name - pattern: '[a-zA-Z0-9\-]+' - type: string - imagePullPolicy: - description: Image pull policy - type: string - imagePullSecrets: - description: Image pull secrets - items: - type: string - type: array - repository: - description: NVIDIA vGPU Manager image repository - type: string - resources: - description: 'Optional: Define resources requests and limits for - each pod' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - description: NVIDIA vGPU Manager image tag - type: string - type: object - required: - - daemonsets - - dcgm - - dcgmExporter - - devicePlugin - - driver - - gfd - - nodeStatusExporter - - operator - - toolkit - type: object - status: - description: ClusterPolicyStatus defines the observed state of ClusterPolicy - properties: - conditions: - description: Conditions is a list of conditions representing the ClusterPolicy's - current state. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: Namespace indicates a namespace in which the operator - is installed - type: string - state: - description: State indicates status of ClusterPolicy - enum: - - ignored - - ready - - notReady - type: string - required: - - state - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/deployments/gpu-operator/crds/nvidia.com_nvidiadrivers.yaml b/deployments/gpu-operator/crds/nvidia.com_nvidiadrivers.yaml index 317972fd2..c49059a38 100644 --- a/deployments/gpu-operator/crds/nvidia.com_nvidiadrivers.yaml +++ b/deployments/gpu-operator/crds/nvidia.com_nvidiadrivers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: nvidiadrivers.nvidia.com spec: group: nvidia.com @@ -357,11 +357,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -389,11 +391,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -406,6 +410,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -450,11 +455,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -482,14 +489,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -709,16 +719,8 @@ spec: description: Conditions is a list of conditions representing the NVIDIADriver's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -759,12 +761,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/deployments/gpu-operator/templates/_helpers.tpl b/deployments/gpu-operator/templates/_helpers.tpl index 305c9d1fe..8969d66e1 100644 --- a/deployments/gpu-operator/templates/_helpers.tpl +++ b/deployments/gpu-operator/templates/_helpers.tpl @@ -76,5 +76,5 @@ Full image name with tag Full image name with tag */}} {{- define "driver-manager.fullimage" -}} -{{- .Values.driver.manager.repository -}}/{{- .Values.driver.manager.image -}}:{{- .Values.driver.manager.version -}} +{{- .Values.driverManager.repository -}}/{{- .Values.driverManager.image -}}:{{- .Values.driverManager.version -}} {{- end }} diff --git a/deployments/gpu-operator/templates/cleanup_crd.yaml b/deployments/gpu-operator/templates/cleanup_crd.yaml index 550525f00..fd0c1b799 100644 --- a/deployments/gpu-operator/templates/cleanup_crd.yaml +++ b/deployments/gpu-operator/templates/cleanup_crd.yaml @@ -26,6 +26,10 @@ spec: - name: {{ . }} {{- end }} {{- end }} + {{- with .Values.operator.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: cleanup-crd image: {{ include "gpu-operator.fullimage" . }} diff --git a/deployments/gpu-operator/templates/clusterpolicy.yaml b/deployments/gpu-operator/templates/clusterpolicy.yaml index f036c936a..b6d825759 100644 --- a/deployments/gpu-operator/templates/clusterpolicy.yaml +++ b/deployments/gpu-operator/templates/clusterpolicy.yaml @@ -12,6 +12,9 @@ metadata: "helm.sh/resource-policy": keep {{- end }} spec: + hostPaths: + rootFS: {{ .Values.hostPaths.rootFS }} + driverInstallDir: {{ .Values.hostPaths.driverInstallDir }} operator: {{- if .Values.operator.defaultRuntime }} defaultRuntime: {{ .Values.operator.defaultRuntime }} @@ -170,17 +173,17 @@ spec: enabled: {{ .Values.driver.rdma.enabled }} useHostMofed: {{ .Values.driver.rdma.useHostMofed }} manager: - {{- if .Values.driver.manager.repository }} - repository: {{ .Values.driver.manager.repository }} + {{- if .Values.driverManager.repository }} + repository: {{ .Values.driverManager.repository }} {{- end }} - {{- if .Values.driver.manager.image }} - image: {{ .Values.driver.manager.image }} + {{- if .Values.driverManager.image }} + image: {{ .Values.driverManager.image }} {{- end }} - {{- if .Values.driver.manager.version }} - version: {{ .Values.driver.manager.version | quote }} + {{- if .Values.driverManager.version }} + version: {{ .Values.driverManager.version | quote }} {{- end }} - {{- if .Values.driver.manager.imagePullPolicy }} - imagePullPolicy: {{ .Values.driver.manager.imagePullPolicy }} + {{- if .Values.driverManager.imagePullPolicy }} + imagePullPolicy: {{ .Values.driverManager.imagePullPolicy }} {{- end }} {{- if .Values.driver.manager.env }} env: {{ toYaml .Values.driver.manager.env | nindent 8 }} @@ -259,17 +262,17 @@ spec: args: {{ toYaml .Values.vgpuManager.args | nindent 6 }} {{- end }} driverManager: - {{- if .Values.vgpuManager.driverManager.repository }} - repository: {{ .Values.vgpuManager.driverManager.repository }} + {{- if .Values.driverManager.repository }} + repository: {{ .Values.driverManager.repository }} {{- end }} - {{- if .Values.vgpuManager.driverManager.image }} - image: {{ .Values.vgpuManager.driverManager.image }} + {{- if .Values.driverManager.image }} + image: {{ .Values.driverManager.image }} {{- end }} - {{- if .Values.vgpuManager.driverManager.version }} - version: {{ .Values.vgpuManager.driverManager.version | quote }} + {{- if .Values.driverManager.version }} + version: {{ .Values.driverManager.version | quote }} {{- end }} - {{- if .Values.vgpuManager.driverManager.imagePullPolicy }} - imagePullPolicy: {{ .Values.vgpuManager.driverManager.imagePullPolicy }} + {{- if .Values.driverManager.imagePullPolicy }} + imagePullPolicy: {{ .Values.driverManager.imagePullPolicy }} {{- end }} {{- if .Values.vgpuManager.driverManager.env }} env: {{ toYaml .Values.vgpuManager.driverManager.env | nindent 8 }} @@ -328,17 +331,17 @@ spec: args: {{ toYaml .Values.vfioManager.args | nindent 6 }} {{- end }} driverManager: - {{- if .Values.vfioManager.driverManager.repository }} - repository: {{ .Values.vfioManager.driverManager.repository }} + {{- if .Values.driverManager.repository }} + repository: {{ .Values.driverManager.repository }} {{- end }} - {{- if .Values.vfioManager.driverManager.image }} - image: {{ .Values.vfioManager.driverManager.image }} + {{- if .Values.driverManager.image }} + image: {{ .Values.driverManager.image }} {{- end }} - {{- if .Values.vfioManager.driverManager.version }} - version: {{ .Values.vfioManager.driverManager.version | quote }} + {{- if .Values.driverManager.version }} + version: {{ .Values.driverManager.version | quote }} {{- end }} - {{- if .Values.vfioManager.driverManager.imagePullPolicy }} - imagePullPolicy: {{ .Values.vfioManager.driverManager.imagePullPolicy }} + {{- if .Values.driverManager.imagePullPolicy }} + imagePullPolicy: {{ .Values.driverManager.imagePullPolicy }} {{- end }} {{- if .Values.vfioManager.driverManager.env }} env: {{ toYaml .Values.vfioManager.driverManager.env | nindent 8 }} @@ -482,9 +485,6 @@ spec: {{- if .Values.dcgm.args }} args: {{ toYaml .Values.dcgm.args | nindent 6 }} {{- end }} - {{- if .Values.dcgm.hostPort }} - hostPort: {{ .Values.dcgm.hostPort }} - {{- end }} dcgmExporter: enabled: {{ .Values.dcgmExporter.enabled }} {{- if .Values.dcgmExporter.repository }} @@ -511,8 +511,9 @@ spec: {{- if .Values.dcgmExporter.args }} args: {{ toYaml .Values.dcgmExporter.args | nindent 6 }} {{- end }} - {{- if .Values.dcgmExporter.config }} - config: {{ toYaml .Values.dcgmExporter.config | nindent 6 }} + {{- if and (.Values.dcgmExporter.config) (.Values.dcgmExporter.config.name) }} + config: + name: {{ .Values.dcgmExporter.config.name }} {{- end }} {{- if .Values.dcgmExporter.serviceMonitor }} serviceMonitor: {{ toYaml .Values.dcgmExporter.serviceMonitor | nindent 6 }} @@ -570,7 +571,9 @@ spec: args: {{ toYaml .Values.migManager.args | nindent 6 }} {{- end }} {{- if .Values.migManager.config }} - config: {{ toYaml .Values.migManager.config | nindent 6 }} + config: + name: {{ .Values.migManager.config.name }} + default: {{ .Values.migManager.config.default }} {{- end }} {{- if .Values.migManager.gpuClientsConfig }} gpuClientsConfig: {{ toYaml .Values.migManager.gpuClientsConfig | nindent 6 }} diff --git a/deployments/gpu-operator/templates/clusterrole.yaml b/deployments/gpu-operator/templates/clusterrole.yaml new file mode 100644 index 000000000..4acbcf29c --- /dev/null +++ b/deployments/gpu-operator/templates/clusterrole.yaml @@ -0,0 +1,146 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gpu-operator + labels: + {{- include "gpu-operator.labels" . | nindent 4 }} + app.kubernetes.io/component: "gpu-operator" +rules: +- apiGroups: + - config.openshift.io + resources: + - clusterversions + - proxies + verbs: + - get + - list + - watch +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - create + - watch + - update + - patch +- apiGroups: + - "" + resources: + - events + - pods + - pods/eviction + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch +- apiGroups: + - nvidia.com + resources: + - clusterpolicies + - clusterpolicies/finalizers + - clusterpolicies/status + - nvidiadrivers + - nvidiadrivers/finalizers + - nvidiadrivers/status + verbs: + - create + - get + - list + - watch + - update + - patch + - delete + - deletecollection +- apiGroups: + - scheduling.k8s.io + resources: + - priorityclasses + verbs: + - get + - list + - watch + - create +- apiGroups: + - node.k8s.io + resources: + - runtimeclasses + verbs: + - get + - list + - create + - update + - watch + - delete +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - update + - patch + - create +{{- if .Values.operator.cleanupCRD }} + - delete +{{- end }} diff --git a/deployments/gpu-operator/templates/clusterrolebinding.yaml b/deployments/gpu-operator/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..08b87fbce --- /dev/null +++ b/deployments/gpu-operator/templates/clusterrolebinding.yaml @@ -0,0 +1,18 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: gpu-operator + labels: + {{- include "gpu-operator.labels" . | nindent 4 }} + app.kubernetes.io/component: "gpu-operator" +subjects: +- kind: ServiceAccount + name: gpu-operator + namespace: {{ $.Release.Namespace }} +- kind: ServiceAccount + name: node-feature-discovery + namespace: {{ $.Release.Namespace }} +roleRef: + kind: ClusterRole + name: gpu-operator + apiGroup: rbac.authorization.k8s.io diff --git a/deployments/gpu-operator/templates/dcgm_exporter_config.yaml b/deployments/gpu-operator/templates/dcgm_exporter_config.yaml new file mode 100644 index 000000000..c4bf6dcc8 --- /dev/null +++ b/deployments/gpu-operator/templates/dcgm_exporter_config.yaml @@ -0,0 +1,14 @@ +{{- if .Values.dcgmExporter.config }} +{{- if and (.Values.dcgmExporter.config.create) (not (empty .Values.dcgmExporter.config.data)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.dcgmExporter.config.name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "gpu-operator.labels" . | nindent 4 }} +data: + dcgm-metrics.csv: | +{{- .Values.dcgmExporter.config.data | nindent 4 }} +{{- end }} +{{- end }} diff --git a/deployments/gpu-operator/templates/mig_config.yaml b/deployments/gpu-operator/templates/mig_config.yaml new file mode 100644 index 000000000..2ceb04779 --- /dev/null +++ b/deployments/gpu-operator/templates/mig_config.yaml @@ -0,0 +1,10 @@ +{{- if and (.Values.migManager.config.create) (not (empty .Values.migManager.config.data)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.migManager.config.name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "gpu-operator.labels" . | nindent 4 }} +data: {{ toYaml .Values.migManager.config.data | nindent 2 }} +{{- end }} diff --git a/deployments/gpu-operator/templates/role.yaml b/deployments/gpu-operator/templates/role.yaml index ef65b1af6..9e5bcede3 100644 --- a/deployments/gpu-operator/templates/role.yaml +++ b/deployments/gpu-operator/templates/role.yaml @@ -1,159 +1,67 @@ apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +kind: Role metadata: name: gpu-operator labels: {{- include "gpu-operator.labels" . | nindent 4 }} app.kubernetes.io/component: "gpu-operator" rules: -- apiGroups: - - config.openshift.io - resources: - - proxies - verbs: - - get - apiGroups: - rbac.authorization.k8s.io resources: - roles - rolebindings - - clusterroles - - clusterrolebindings - verbs: - - '*' -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - persistentvolumeclaims - - events - - configmaps - - secrets - - serviceaccounts - - nodes - verbs: - - '*' -- apiGroups: - - "" - resources: - - namespaces verbs: + - create - get - list - - create - watch - update - patch -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' + - delete - apiGroups: - apps resources: - controllerrevisions verbs: - - 'get' - - 'list' - - 'watch' -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - - prometheusrules - verbs: - get - list - - create - watch - - update - - delete - apiGroups: - - nvidia.com - resources: - - '*' - verbs: - - '*' -- apiGroups: - - scheduling.k8s.io + - apps resources: - - priorityclasses + - daemonsets verbs: - - get - - list - - watch - create -- apiGroups: - - security.openshift.io - resources: - - securitycontextconstraints - verbs: - - '*' -- apiGroups: - - config.openshift.io - resources: - - clusterversions - verbs: - - get - - list - - watch -- apiGroups: - - "" - - coordination.k8s.io - resources: - - configmaps - - leases - verbs: - get - list - watch - - create - update - patch - delete -- apiGroups: - - node.k8s.io - resources: - - runtimeclasses - verbs: - - get - - list - - create - - update - - watch - - delete -- apiGroups: - - image.openshift.io - resources: - - imagestreams - verbs: - - get - - list - - watch - apiGroups: - "" resources: + - configmaps + - endpoints - pods - pods/eviction + - secrets + - services + - services/finalizers + - serviceaccounts verbs: + - create - get - list - watch - - create - - delete - update - patch + - delete - apiGroups: - - "" + - coordination.k8s.io resources: - - nodes + - leases verbs: - get - list @@ -161,17 +69,16 @@ rules: - create - update - patch + - delete - apiGroups: - - apiextensions.k8s.io + - monitoring.coreos.com resources: - - customresourcedefinitions + - servicemonitors + - prometheusrules verbs: - get - list + - create - watch - update - - patch - - create -{{- if .Values.operator.cleanupCRD }} - delete -{{- end }} diff --git a/deployments/gpu-operator/templates/rolebinding.yaml b/deployments/gpu-operator/templates/rolebinding.yaml index 08b87fbce..c915a4659 100644 --- a/deployments/gpu-operator/templates/rolebinding.yaml +++ b/deployments/gpu-operator/templates/rolebinding.yaml @@ -1,4 +1,4 @@ -kind: ClusterRoleBinding +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: gpu-operator @@ -9,10 +9,7 @@ subjects: - kind: ServiceAccount name: gpu-operator namespace: {{ $.Release.Namespace }} -- kind: ServiceAccount - name: node-feature-discovery - namespace: {{ $.Release.Namespace }} roleRef: - kind: ClusterRole + kind: Role name: gpu-operator apiGroup: rbac.authorization.k8s.io diff --git a/deployments/gpu-operator/templates/upgrade_crd.yaml b/deployments/gpu-operator/templates/upgrade_crd.yaml index 4fbb34847..6552558af 100644 --- a/deployments/gpu-operator/templates/upgrade_crd.yaml +++ b/deployments/gpu-operator/templates/upgrade_crd.yaml @@ -74,6 +74,10 @@ spec: - name: {{ . }} {{- end }} {{- end }} + {{- with .Values.operator.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - name: upgrade-crd image: {{ include "gpu-operator.fullimage" . }} @@ -82,7 +86,7 @@ spec: - /bin/sh - -c - > - kubectl apply -f /opt/gpu-operator/nvidia.com_clusterpolicies_crd.yaml; + kubectl apply -f /opt/gpu-operator/nvidia.com_clusterpolicies.yaml; kubectl apply -f /opt/gpu-operator/nvidia.com_nvidiadrivers.yaml; {{- if .Values.nfd.enabled }} kubectl apply -f /opt/gpu-operator/nfd-api-crds.yaml; diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index f7c776fdf..90e2efd9d 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -20,6 +20,18 @@ sandboxWorkloads: enabled: false defaultWorkload: "container" +hostPaths: + # rootFS represents the path to the root filesystem of the host. + # This is used by components that need to interact with the host filesystem + # and as such this must be a chroot-able filesystem. + # Examples include the MIG Manager and Toolkit Container which may need to + # stop, start, or restart systemd services + rootFS: "/" + + # driverInstallDir represents the root at which driver files including libraries, + # config files, and executables can be found. + driverInstallDir: "/run/nvidia/driver" + daemonsets: labels: {} annotations: {} @@ -67,11 +79,11 @@ operator: cleanupCRD: false # upgrade CRD on chart upgrade, requires --disable-openapi-validation flag # to be passed during helm upgrade. - upgradeCRD: false + upgradeCRD: true initContainer: image: cuda repository: nvcr.io/nvidia - version: 12.3.2-base-ubi8 + version: 12.6.2-base-ubi9 imagePullPolicy: IfNotPresent tolerations: - key: "node-role.kubernetes.io/master" @@ -131,7 +143,7 @@ driver: usePrecompiled: false repository: nvcr.io/nvidia image: driver - version: "550.54.14" + version: "550.127.08" imagePullPolicy: IfNotPresent imagePullSecrets: [] startupProbe: @@ -177,12 +189,6 @@ driver: timeoutSeconds: 300 deleteEmptyDir: false manager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.5 - imagePullPolicy: IfNotPresent env: - name: ENABLE_GPU_POD_EVICTION value: "true" @@ -219,7 +225,7 @@ toolkit: enabled: true repository: nvcr.io/nvidia/k8s image: container-toolkit - version: v1.14.6-ubuntu20.04 + version: v1.17.2-ubuntu20.04 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] @@ -230,7 +236,7 @@ devicePlugin: enabled: true repository: nvcr.io/nvidia image: k8s-device-plugin - version: v0.15.0-rc.1-ubi8 + version: v0.17.0 imagePullPolicy: IfNotPresent imagePullSecrets: [] args: [] @@ -271,12 +277,16 @@ devicePlugin: config: # Create a ConfigMap (default: false) create: false - # ConfigMap name (either exiting or to create a new one with create=true above) + # ConfigMap name (either existing or to create a new one with create=true above) name: "" # Default config name within the ConfigMap default: "" # Data section for the ConfigMap to create (i.e only applies when create=true) data: {} + # MPS related configuration for the plugin + mps: + # MPS root path on the host + root: "/run/nvidia/mps" # standalone dcgm hostengine dcgm: @@ -284,9 +294,8 @@ dcgm: enabled: false repository: nvcr.io/nvidia/cloud-native image: dcgm - version: 3.3.0-1-ubuntu22.04 + version: 3.3.9-1-ubuntu22.04 imagePullPolicy: IfNotPresent - hostPort: 5555 args: [] env: [] resources: {} @@ -295,7 +304,7 @@ dcgmExporter: enabled: true repository: nvcr.io/nvidia/k8s image: dcgm-exporter - version: 3.3.0-3.2.0-ubuntu22.04 + version: 3.3.9-3.6.1-ubuntu22.04 imagePullPolicy: IfNotPresent env: - name: DCGM_EXPORTER_LISTEN @@ -317,12 +326,31 @@ dcgmExporter: # target_label: instance # replacement: $1 # action: replace + # DCGM Exporter configuration + # This block is used to configure DCGM Exporter to emit a customized list of metrics. + # Use "name" to either point to an existing ConfigMap or to create a new one with a + # list of configurations (i.e with create=true). + # When pointing to an existing ConfigMap, the ConfigMap must exist in the same namespace as the release. + # The metrics are expected to be listed under a key called `dcgm-metrics.csv`. + # Use "data" to build an integrated ConfigMap from a set of custom metrics as + # part of the chart. An example of some custom metrics are shown below. Note that + # the contents of "data" must be in CSV format and be valid DCGM Exporter metric configurations. + # config: + # name: custom-dcgm-exporter-metrics + # create: true + # data: |- + # Format + # If line starts with a '#' it is considered a comment + # DCGM FIELD, Prometheus metric type, help message + # Clocks + # DCGM_FI_DEV_SM_CLOCK, gauge, SM clock frequency (in MHz). + # DCGM_FI_DEV_MEM_CLOCK, gauge, Memory clock frequency (in MHz). gfd: enabled: true repository: nvcr.io/nvidia image: k8s-device-plugin - version: v0.15.0-rc.1-ubi8 + version: v0.17.0 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: @@ -336,16 +364,52 @@ migManager: enabled: true repository: nvcr.io/nvidia/cloud-native image: k8s-mig-manager - version: v0.6.0-ubuntu20.04 + version: v0.10.0-ubuntu20.04 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: - name: WITH_REBOOT value: "false" resources: {} + # MIG configuration + # Use "name" to either point to an existing ConfigMap or to create a new one with a list of configurations(i.e with create=true). + # Use "data" to build an integrated ConfigMap from a set of configurations as + # part of this helm chart. An example of setting "data" might be: + # config: + # name: custom-mig-parted-configs + # create: true + # data: |- + # config.yaml: |- + # version: v1 + # mig-configs: + # all-disabled: + # - devices: all + # mig-enabled: false + # custom-mig: + # - devices: [0] + # mig-enabled: false + # - devices: [1] + # mig-enabled: true + # mig-devices: + # "1g.10gb": 7 + # - devices: [2] + # mig-enabled: true + # mig-devices: + # "2g.20gb": 2 + # "3g.40gb": 1 + # - devices: [3] + # mig-enabled: true + # mig-devices: + # "3g.40gb": 1 + # "4g.40gb": 1 config: - name: "default-mig-parted-config" default: "all-disabled" + # Create a ConfigMap (default: false) + create: false + # ConfigMap name (either existing or to create a new one with create=true above) + name: "" + # Data section for the ConfigMap to create (i.e only applies when create=true) + data: {} gpuClientsConfig: name: "" @@ -363,7 +427,7 @@ gds: enabled: false repository: nvcr.io/nvidia/cloud-native image: nvidia-fs - version: "2.17.5" + version: "2.20.5" imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] @@ -373,7 +437,7 @@ gdrcopy: enabled: false repository: nvcr.io/nvidia/cloud-native image: gdrdrv - version: "v2.4.1" + version: "v2.4.1-2" imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] @@ -389,12 +453,6 @@ vgpuManager: env: [] resources: {} driverManager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.4 - imagePullPolicy: IfNotPresent env: - name: ENABLE_GPU_POD_EVICTION value: "false" @@ -405,7 +463,7 @@ vgpuDeviceManager: enabled: true repository: nvcr.io/nvidia/cloud-native image: vgpu-device-manager - version: "v0.2.4" + version: v0.2.8 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] @@ -417,18 +475,12 @@ vfioManager: enabled: true repository: nvcr.io/nvidia image: cuda - version: 12.3.2-base-ubi8 + version: 12.6.2-base-ubi9 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] resources: {} driverManager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.5 - imagePullPolicy: IfNotPresent env: - name: ENABLE_GPU_POD_EVICTION value: "false" @@ -440,12 +492,12 @@ kataManager: config: artifactsDir: "/opt/nvidia-gpu-operator/artifacts/runtimeclasses" runtimeClasses: - - name: kata-qemu-nvidia-gpu + - name: kata-nvidia-gpu nodeSelector: {} artifacts: url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.54.03 pullSecret: "" - - name: kata-qemu-nvidia-gpu-snp + - name: kata-nvidia-gpu-snp nodeSelector: "nvidia.com/cc.capable": "true" artifacts: @@ -453,7 +505,7 @@ kataManager: pullSecret: "" repository: nvcr.io/nvidia/cloud-native image: k8s-kata-manager - version: v0.1.2 + version: v0.2.2 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] @@ -463,7 +515,7 @@ sandboxDevicePlugin: enabled: true repository: nvcr.io/nvidia image: kubevirt-gpu-device-plugin - version: v1.2.4 + version: v1.2.10 imagePullPolicy: IfNotPresent imagePullSecrets: [] args: [] @@ -485,6 +537,7 @@ ccManager: node-feature-discovery: enableNodeFeatureApi: true + priorityClassName: system-node-critical gc: enable: true replicaCount: 1 @@ -529,3 +582,10 @@ node-feature-discovery: # resourceLabels: ["nvidia.com/feature-1","nvidia.com/feature-2"] # enableTaints: false # labelWhiteList: "nvidia.com/gpu" + +# all use driver manager components from the same image version +driverManager: + image: k8s-driver-manager + repository: nvcr.io/nvidia/cloud-native + version: v0.7.0 + imagePullPolicy: IfNotPresent diff --git a/docker/Dockerfile b/docker/Dockerfile index 471285567..9678b5a2a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -12,15 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG CUDA_IMAGE=nvcr.io/nvidia/cuda -ARG CUDA_VERSION=undefined - -ARG BASE_DIST=ubi8 ARG GOLANG_VERSION=x.x.x -FROM ${CUDA_IMAGE}:${CUDA_VERSION}-base-${BASE_DIST} as builder +FROM nvcr.io/nvidia/cuda:12.6.2-base-ubi9 as builder -RUN yum install -y wget make git gcc +RUN dnf install -y wget make git gcc ARG GOLANG_VERSION=0.0.0 RUN set -eux; \ @@ -29,7 +25,7 @@ RUN set -eux; \ case "${arch##*-}" in \ x86_64 | amd64) ARCH='amd64' ;; \ ppc64el | ppc64le) ARCH='ppc64le' ;; \ - aarch64) ARCH='arm64' ;; \ + aarch64 | arm64) ARCH='arm64' ;; \ *) echo "unsupported architecture" ; exit 1 ;; \ esac; \ wget -nv -O - https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${ARCH}.tar.gz \ @@ -59,7 +55,7 @@ ARG VERSION="unknown" ARG GIT_COMMIT="unknown" RUN make gpu-operator -FROM ${CUDA_IMAGE}:${CUDA_VERSION}-base-${BASE_DIST} +FROM nvcr.io/nvidia/cuda:12.6.2-base-ubi9 # Remove CUDA libs(compat etc) in favor of libs installed by the NVIDIA driver RUN dnf remove -y cuda-* @@ -94,14 +90,14 @@ RUN chmod +x ./kubectl RUN mv ./kubectl /usr/local/bin # Add CRD resource into the image for helm upgrades -COPY deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml /opt/gpu-operator/nvidia.com_clusterpolicies_crd.yaml +COPY deployments/gpu-operator/crds/nvidia.com_clusterpolicies.yaml /opt/gpu-operator/nvidia.com_clusterpolicies.yaml COPY deployments/gpu-operator/crds/nvidia.com_nvidiadrivers.yaml /opt/gpu-operator/nvidia.com_nvidiadrivers.yaml COPY deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml /opt/gpu-operator/nfd-api-crds.yaml # Install / upgrade packages here that are required to resolve CVEs ARG CVE_UPDATES RUN if [ -n "${CVE_UPDATES}" ]; then \ - yum update -y ${CVE_UPDATES} && \ + dnf update -y ${CVE_UPDATES} && \ rm -rf /var/cache/yum/*; \ fi diff --git a/docker/Dockerfile.devel b/docker/Dockerfile.devel index 5deb5be52..33d0abc15 100644 --- a/docker/Dockerfile.devel +++ b/docker/Dockerfile.devel @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. ARG GOLANG_VERSION=x.x.x -ARG GOLANGCI_LINT_VERSION=1.55.2 +ARG GOLANGCI_LINT_VERSION=1.60.3 FROM golang:${GOLANG_VERSION} diff --git a/go.mod b/go.mod index 75a9fbfe2..c306ecd65 100644 --- a/go.mod +++ b/go.mod @@ -1,91 +1,92 @@ module github.com/NVIDIA/gpu-operator -go 1.21 +go 1.23 + +toolchain go1.23.3 require ( - github.com/Masterminds/sprig/v3 v3.2.3 - github.com/NVIDIA/go-nvlib v0.1.0 - github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a - github.com/NVIDIA/k8s-operator-libs v0.0.0-20240214071211-ea58a3ada15c - github.com/NVIDIA/nvidia-container-toolkit v1.14.6 - github.com/davecgh/go-spew v1.1.1 - github.com/go-logr/logr v1.4.1 - github.com/mitchellh/hashstructure v1.1.0 - github.com/mittwald/go-helm-client v0.12.7 - github.com/onsi/ginkgo/v2 v2.14.0 - github.com/onsi/gomega v1.30.0 - github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab - github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5 - github.com/operator-framework/api v0.17.6 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2 - github.com/prometheus/client_golang v1.18.0 - github.com/regclient/regclient v0.4.8 + github.com/Masterminds/sprig/v3 v3.3.0 + github.com/NVIDIA/go-nvlib v0.7.0 + github.com/NVIDIA/k8s-kata-manager v0.2.2 + github.com/NVIDIA/k8s-operator-libs v0.0.0-20240826221728-249ba446fa35 + github.com/NVIDIA/nvidia-container-toolkit v1.17.2 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/go-logr/logr v1.4.2 + github.com/mittwald/go-helm-client v0.12.14 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.35.1 + github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f + github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f + github.com/operator-framework/api v0.27.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 + github.com/prometheus/client_golang v1.20.5 + github.com/regclient/regclient v0.7.2 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 - github.com/urfave/cli/v2 v2.27.1 - go.uber.org/zap v1.26.0 - golang.org/x/mod v0.15.0 - k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/client-go v0.29.1 - k8s.io/klog/v2 v2.110.1 - sigs.k8s.io/controller-runtime v0.17.1 - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd + github.com/urfave/cli/v2 v2.27.5 + go.uber.org/zap v1.27.0 + golang.org/x/mod v0.22.0 + k8s.io/api v0.31.2 + k8s.io/apiextensions-apiserver v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 + k8s.io/klog/v2 v2.130.1 + sigs.k8s.io/controller-runtime v0.19.1 sigs.k8s.io/yaml v1.4.0 ) require ( + dario.cat/mergo v1.0.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/containerd/containerd v1.7.11 // indirect + github.com/containerd/containerd v1.7.12 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/cyphar/filepath-securejoin v0.3.1 // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/docker/cli v24.0.7+incompatible // indirect + github.com/docker/cli v25.0.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/docker v25.0.6+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.5.0 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/emicklei/go-restful/v3 v3.11.1 // indirect - github.com/evanphx/json-patch v5.7.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/swag v0.22.7 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect @@ -94,13 +95,13 @@ require ( github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/huandu/xstrings v1.4.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -109,74 +110,73 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rubenv/sql-migrate v1.6.0 // indirect + github.com/rubenv/sql-migrate v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230818092907-09424fdc8884 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - helm.sh/helm/v3 v3.13.3 // indirect - k8s.io/apiserver v0.29.1 // indirect - k8s.io/cli-runtime v0.29.1 // indirect - k8s.io/component-base v0.29.1 // indirect - k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 // indirect - k8s.io/kubectl v0.29.1 // indirect - k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect - oras.land/oras-go v1.2.4 // indirect - sigs.k8s.io/kustomize/api v0.16.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect + helm.sh/helm/v3 v3.16.1 // indirect + k8s.io/apiserver v0.31.2 // indirect + k8s.io/cli-runtime v0.31.1 // indirect + k8s.io/component-base v0.31.2 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubectl v0.31.0 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect + oras.land/oras-go v1.2.5 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index aa60d3945..3c2dda11a 100644 --- a/go.sum +++ b/go.sum @@ -1,34 +1,37 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= -github.com/NVIDIA/go-nvlib v0.1.0 h1:VYNqzGRaE5zrku1ysS9J+hSkuuwTpYuSLqDF1BaCNUs= -github.com/NVIDIA/go-nvlib v0.1.0/go.mod h1:lDrLM77CNdwfCN5ySYpuyzBQLQR6pGC+rHri1T4l+l4= -github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a h1:3nyTp1cXzZMHoUuhMwHdz9QDzl100ECvIDYFxdjWk6o= -github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a/go.mod h1:K7HCLTndSwBEZwBu6sU7daVeryV1Qt/DtKH8nONJj4o= -github.com/NVIDIA/k8s-operator-libs v0.0.0-20240214071211-ea58a3ada15c h1:nt9jPM6K7DCYydMKhlfMrZ9aFasdNU4WKUZvO4cN2us= -github.com/NVIDIA/k8s-operator-libs v0.0.0-20240214071211-ea58a3ada15c/go.mod h1:m9Xr+fGiGWTxyCYnbby7a91cDF1GpMH4PSiDwoDp5FA= -github.com/NVIDIA/nvidia-container-toolkit v1.14.6 h1:42PccGiwrz2K5KTEOOO3X023ToBqNdd0xnnJVCh+Mqs= -github.com/NVIDIA/nvidia-container-toolkit v1.14.6/go.mod h1:SD4zQVx3nyNeh1JKewquKoGd5i+nzJwjRCTu9Xmh5H4= +github.com/NVIDIA/go-nvlib v0.7.0 h1:Z/J7skMdLbTiHvomKVsGYsttfQMZj5FwNYIFXhZ4i/c= +github.com/NVIDIA/go-nvlib v0.7.0/go.mod h1:9UrsLGx/q1OrENygXjOuM5Ey5KCtiZhbvBlbUIxtGWY= +github.com/NVIDIA/k8s-kata-manager v0.2.2 h1:+xVIp4yLfCjZ31Dfrm9LOKo4T47b4g+DV6XkwAqalns= +github.com/NVIDIA/k8s-kata-manager v0.2.2/go.mod h1:UGjGQUcpXTegwyOc5IwcyLTzPKwO9lOIkqw/qUzk8Q0= +github.com/NVIDIA/k8s-operator-libs v0.0.0-20240826221728-249ba446fa35 h1:w9DXPTJCq9k2PVpdBQJrWE4vAmZcFaSHKLpM/xos9WI= +github.com/NVIDIA/k8s-operator-libs v0.0.0-20240826221728-249ba446fa35/go.mod h1:sw6XRI5wq0Q+nSgaWa1Pyo/ZKxQebc70x6VIznDAxtM= +github.com/NVIDIA/nvidia-container-toolkit v1.17.2 h1:iE6PK9SQH3HyDrOolu27xn3CJgURR3bDtnbfFrxdML8= +github.com/NVIDIA/nvidia-container-toolkit v1.17.2/go.mod h1:R6bNf6ca0IjjACa0ncKGvsrx6zSjsgz8QkFyBDk5szU= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -51,70 +54,69 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembj github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= -github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= +github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= -github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= +github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/emicklei/go-restful/v3 v3.11.1 h1:S+9bSbua1z3FgCnV0KKOSSZ3mDthb5NyEPL5gEpCvyk= -github.com/emicklei/go-restful/v3 v3.11.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -123,9 +125,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -134,13 +135,13 @@ github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbX github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -151,10 +152,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -162,25 +161,22 @@ github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl76 github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= @@ -194,17 +190,14 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -214,19 +207,20 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -240,30 +234,23 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= -github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= -github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mittwald/go-helm-client v0.12.7 h1:+rhDcLP96d+IavqTORDsY8tc6BNzukwzka85HX5hBzw= -github.com/mittwald/go-helm-client v0.12.7/go.mod h1:ipUXxirGzXl7hfMV4lY3SYXhwudYYb6T/dnNfKHDQiM= +github.com/mittwald/go-helm-client v0.12.14 h1:az3GJ4kRmFK609Ic3iHXveNtg92n9jWG0YpKKTIK4oo= +github.com/mittwald/go-helm-client v0.12.14/go.mod h1:2VogAupgnV7FiuoPqtpCYKS/RrMh9fFA3/pD/OmTaLc= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= @@ -277,27 +264,27 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/olareg/olareg v0.1.1 h1:Ui7q93zjcoF+U9U71sgqgZWByDoZOpqHitUXEu2xV+g= +github.com/olareg/olareg v0.1.1/go.mod h1:w8NP4SWrHHtxsFaUiv1lnCnYPm4sN1seCd2h7FK/dc0= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab h1:L3k198pZJhluvJZzD/ySpkKqJzQh5MgWlsT4U6NOYUY= -github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5 h1:iQ2Y1LUX7FbBm6ddaSVz/KeWXUMkqrBP/C5yt0DvBgI= -github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5/go.mod h1:Y5Hp789dTrF6Fq8cA5YQlpwffmlLy8mc2un/CY0cg7Q= -github.com/operator-framework/api v0.17.6 h1:E6+vlvYUKafvoXYtCuHlDZrXX4vl8AT+r93OxNlzjpU= -github.com/operator-framework/api v0.17.6/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f h1:ya1OmyZm3LIIxI3U9VE9Nyx3ehCHgBwxyFUPflYPWls= +github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= +github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f h1:FRc0bVNWprihWS0GqQWzb3dY4dkCwpOP3mDw5NwSoR4= +github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f/go.mod h1:KiZi2mJRH1TOJ3FtBDYS6YvUL30s/iIXaGSUrSa36mo= +github.com/operator-framework/api v0.27.0 h1:OrVaGKZJvbZo58HTv2guz7aURkhVKYhFqZ/6VpifiXI= +github.com/operator-framework/api v0.27.0/go.mod h1:lg2Xx+S8NQWGYlEOvFwQvH46E5EK5IrAIL7HWfAhciM= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -305,54 +292,53 @@ github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rK github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2 h1:DZzMjhqxx3+kAPpwWdng3ktO6NErh1wGuW5tXJamak8= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2/go.mod h1:xcfWyzl4BpEe5jnVkw7D1yCHU7GHjfjCERJsEfGbpSU= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 h1:Fm9Z+FabnB+6EoGq15j+pyLmaK6hYrYOpBlTzOLTQ+E= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/regclient/regclient v0.4.8 h1:h4uZRR4OT4oO+50qWu4bj+rzqRs/JwD3erb6lHIkYK4= -github.com/regclient/regclient v0.4.8/go.mod h1:UC6i29I09h9KHyABGLGvsvGi7KYRY8ZKLyt7fzvW4oE= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/regclient/regclient v0.7.2 h1:vcldDAwBMLtighYVMeb6qNt5+0hKg3AN2IkCc0JIJNM= +github.com/regclient/regclient v0.7.2/go.mod h1:QlA7W9/pvmbblOXM4d49JgfuOTwVXcUMKt3bFuOSVIQ= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rubenv/sql-migrate v1.6.0 h1:IZpcTlAx/VKXphWEpwWJ7BaMq05tYtE80zYz+8a5Il8= -github.com/rubenv/sql-migrate v1.6.0/go.mod h1:m3ilnKP7sNb4eYkLsp6cGdPOl4OBcXM6rcbzU+Oqc5k= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -361,13 +347,16 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= -github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -377,140 +366,111 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230818092907-09424fdc8884 h1:V0LUbfm4kVA1CPG8FgG9AGZqa3ykE5U12Gd3PZgoItA= -gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230818092907-09424fdc8884/go.mod h1:/x5Ky1ZJNyCjDkgSL1atII0EFKQF5WaIHKeP5nkaQfk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v5 v5.7.0 h1:dGKGylPlZ/jus2g1YqhhyzfH0gPy2R8/MYUpW/OslTY= -gopkg.in/evanphx/json-patch.v5 v5.7.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -518,40 +478,40 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.13.3 h1:0zPEdGqHcubehJHP9emCtzRmu8oYsJFRrlVF3TFj8xY= -helm.sh/helm/v3 v3.13.3/go.mod h1:3OKO33yI3p4YEXtTITN2+4oScsHeQe71KuzhlZ+aPfg= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= -k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= -k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 h1:+XYBQU3ZKUu60H6fEnkitTTabGoKfIG8zczhZBENu9o= -k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= -k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= -k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= -oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= -sigs.k8s.io/controller-runtime v0.17.1 h1:V1dQELMGVk46YVXXQUbTFujU7u4DQj6YUj9Rb6cuzz8= -sigs.k8s.io/controller-runtime v0.17.1/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +helm.sh/helm/v3 v3.16.1 h1:cER6tI/8PgUAsaJaQCVBUg3VI9KN4oVaZJgY60RIc0c= +helm.sh/helm/v3 v3.16.1/go.mod h1:r+xBHHP20qJeEqtvBXMf7W35QDJnzY/eiEBzt+TfHps= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4= +k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE= +k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= +k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA= +k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.31.0 h1:kANwAAPVY02r4U4jARP/C+Q1sssCcN/1p9Nk+7BQKVg= +k8s.io/kubectl v0.31.0/go.mod h1:pB47hhFypGsaHAPjlwrNbvhXgmuAr01ZBvAIIUaI8d4= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= +sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.16.0 h1:/zAR4FOQDCkgSDmVzV2uiFbuy9bhu3jEzthrHCuvm1g= -sigs.k8s.io/kustomize/api v0.16.0/go.mod h1:MnFZ7IP2YqVyVwMWoRxPtgl/5hpA+eCCrQR/866cm5c= -sigs.k8s.io/kustomize/kyaml v0.16.0 h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0= -sigs.k8s.io/kustomize/kyaml v0.16.0/go.mod h1:xOK/7i+vmE14N2FdFyugIshB8eF6ALpy7jI87Q2nRh4= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/must-gather.sh b/hack/must-gather.sh index cb9e0b3e5..c66b172dc 100755 --- a/hack/must-gather.sh +++ b/hack/must-gather.sh @@ -110,7 +110,7 @@ for node in $(echo "$gpu_pci_nodes"); do done echo "Get the GPU nodes (status)" -$K get nodes -l nvidia.com/gpu.present=true > $ARTIFACT_DIR/gpu_nodes.status +$K get nodes -l nvidia.com/gpu.present=true -o wide > $ARTIFACT_DIR/gpu_nodes.status echo "Get the GPU nodes (description)" $K describe nodes -l nvidia.com/gpu.present=true > $ARTIFACT_DIR/gpu_nodes.descr diff --git a/internal/conditions/clusterpolicy.go b/internal/conditions/clusterpolicy.go index 4782a634d..b1d89ba13 100644 --- a/internal/conditions/clusterpolicy.go +++ b/internal/conditions/clusterpolicy.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - nvidiav1 "github.com/NVIDIA/gpu-operator/api/v1" + nvidiav1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" ) // Specific implementation of the Updater interface for one of our controllers diff --git a/internal/conditions/conditions_test.go b/internal/conditions/conditions_test.go index 515683e53..6d811d09e 100644 --- a/internal/conditions/conditions_test.go +++ b/internal/conditions/conditions_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" ) func TestConditionsUpdater_SetConditionsReady(t *testing.T) { diff --git a/internal/conditions/nvidiadriver.go b/internal/conditions/nvidiadriver.go index 749a202a5..c3ff538fd 100644 --- a/internal/conditions/nvidiadriver.go +++ b/internal/conditions/nvidiadriver.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" ) const ( diff --git a/internal/consts/consts.go b/internal/consts/consts.go index a65c3027d..c2850f419 100644 --- a/internal/consts/consts.go +++ b/internal/consts/consts.go @@ -39,6 +39,9 @@ const ( // Containerd runtime Containerd = "containerd" + // OpenshiftNamespace indicates the main namespace of an Openshift cluster + OpenshiftNamespace = "openshift" + OcpDriverToolkitVersionLabel = "openshift.driver-toolkit.rhcos" OcpDriverToolkitIdentificationLabel = "openshift.driver-toolkit" NfdOSTreeVersionLabelKey = "feature.node.kubernetes.io/system-os_release.OSTREE_VERSION" diff --git a/internal/state/driver.go b/internal/state/driver.go index 8a00216d3..34e19ef71 100644 --- a/internal/state/driver.go +++ b/internal/state/driver.go @@ -29,15 +29,20 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + gpuv1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/controllers/clusterinfo" "github.com/NVIDIA/gpu-operator/internal/consts" "github.com/NVIDIA/gpu-operator/internal/image" @@ -48,6 +53,11 @@ import ( const ( nfdOSReleaseIDLabelKey = "feature.node.kubernetes.io/system-os_release.ID" nfdOSVersionIDLabelKey = "feature.node.kubernetes.io/system-os_release.VERSION_ID" + + // AppComponentLabelKey indicates the label key of the component + AppComponentLabelKey = "app.kubernetes.io/component" + // AppComponentLabelValue indicates the label values of the nvidia-gpu-driver component + AppComponentLabelValue = "nvidia-driver" ) type stateDriver struct { @@ -90,6 +100,7 @@ type driverRenderData struct { Openshift *openshiftSpec Precompiled *precompiledSpec AdditionalConfigs *additionalConfigs + HostRoot string } func NewStateDriver( @@ -121,23 +132,12 @@ func (s *stateDriver) Sync(ctx context.Context, customResource interface{}, info return SyncStateError, fmt.Errorf("NVIDIADriver CR not provided as input to Sync()") } - info := infoCatalog.Get(InfoTypeClusterPolicyCR) - if info == nil { - return SyncStateError, fmt.Errorf("failed to get ClusterPolicy CR from info catalog") - } - - info = infoCatalog.Get(InfoTypeClusterInfo) - if info == nil { - return SyncStateNotReady, fmt.Errorf("failed to get cluster info from info catalog") - } - clusterInfo := info.(clusterinfo.Interface) - err := s.cleanupStaleDriverDaemonsets(ctx, cr) if err != nil { return SyncStateNotReady, fmt.Errorf("failed to cleanup stale driver DaemonSets: %w", err) } - objs, err := s.getManifestObjects(ctx, cr, clusterInfo) + objs, err := s.getManifestObjects(ctx, cr, infoCatalog) if err != nil { return SyncStateNotReady, fmt.Errorf("failed to create k8s objects from manifests: %v", err) } @@ -163,9 +163,17 @@ func (s *stateDriver) Sync(ctx context.Context, customResource interface{}, info func (s *stateDriver) GetWatchSources(mgr ctrlManager) map[string]SyncingSource { wr := make(map[string]SyncingSource) + nvDriverPredicate := predicate.NewTypedPredicateFuncs(func(ds *appsv1.DaemonSet) bool { + ls := metav1.LabelSelector{MatchLabels: map[string]string{AppComponentLabelKey: AppComponentLabelValue}} + selector, _ := metav1.LabelSelectorAsSelector(&ls) + return selector.Matches(labels.Set(ds.GetLabels())) + }) wr["DaemonSet"] = source.Kind( mgr.GetCache(), &appsv1.DaemonSet{}, + handler.TypedEnqueueRequestForOwner[*appsv1.DaemonSet](mgr.GetScheme(), mgr.GetRESTMapper(), + &nvidiav1alpha1.NVIDIADriver{}, handler.OnlyControllerOwner()), + nvDriverPredicate, ) return wr } @@ -183,7 +191,10 @@ func (s *stateDriver) cleanupStaleDriverDaemonsets(ctx context.Context, cr *nvid for _, ds := range list.Items { ds := ds - if ds.Status.DesiredNumberScheduled == 0 { + // We consider a daemonset to be stale only if it has no desired number of pods and no pods currently mis-scheduled + // As per the Kubernetes docs, a daemonset pod is mis-scheduled when an already scheduled pod no longer satisfies + // node affinity constraints or has un-tolerated taints, for e.g. "node.kubernetes.io/unreachable:NoSchedule" + if ds.Status.DesiredNumberScheduled == 0 && ds.Status.NumberMisscheduled == 0 { logger.V(consts.LogLevelInfo).Info("Deleting inactive driver DaemonSet", "Name", ds.Name) err = s.client.Delete(ctx, &ds) if err != nil { @@ -197,9 +208,21 @@ func (s *stateDriver) cleanupStaleDriverDaemonsets(ctx context.Context, cr *nvid return nil } -func (s *stateDriver) getManifestObjects(ctx context.Context, cr *nvidiav1alpha1.NVIDIADriver, clusterInfo clusterinfo.Interface) ([]*unstructured.Unstructured, error) { +func (s *stateDriver) getManifestObjects(ctx context.Context, cr *nvidiav1alpha1.NVIDIADriver, infoCatalog InfoCatalog) ([]*unstructured.Unstructured, error) { logger := log.FromContext(ctx) + info := infoCatalog.Get(InfoTypeClusterPolicyCR) + if info == nil { + return nil, fmt.Errorf("failed to get ClusterPolicy CR from info catalog") + } + clusterPolicy := info.(gpuv1.ClusterPolicy) + + info = infoCatalog.Get(InfoTypeClusterInfo) + if info == nil { + return nil, fmt.Errorf("failed to get cluster info from info catalog") + } + clusterInfo := info.(clusterinfo.Interface) + runtimeSpec, err := getRuntimeSpec(ctx, s.client, clusterInfo, &cr.Spec) if err != nil { return nil, fmt.Errorf("failed to construct cluster runtime spec: %w", err) @@ -210,6 +233,7 @@ func (s *stateDriver) getManifestObjects(ctx context.Context, cr *nvidiav1alpha1 renderData := &driverRenderData{ GPUDirectRDMA: gpuDirectRDMASpec, Runtime: runtimeSpec, + HostRoot: clusterPolicy.Spec.HostPaths.RootFS, } if len(runtimeSpec.NodePools) == 0 { diff --git a/internal/state/driver_test.go b/internal/state/driver_test.go index bb1b354d1..a591fa9d3 100644 --- a/internal/state/driver_test.go +++ b/internal/state/driver_test.go @@ -18,7 +18,6 @@ package state import ( "bytes" - "fmt" "os" "path/filepath" "strings" @@ -27,17 +26,15 @@ import ( configv1 "github.com/openshift/api/config/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer/json" apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/internal/render" "github.com/NVIDIA/gpu-operator/internal/utils" ) @@ -105,6 +102,8 @@ func TestDriverRenderRDMA(t *testing.T) { renderData := getMinimalDriverRenderData() + renderData.AdditionalConfigs = getSampleAdditionalConfigs() + renderData.GPUDirectRDMA = &nvidiav1alpha1.GPUDirectRDMASpec{ Enabled: utils.BoolPtr(true), } @@ -116,50 +115,6 @@ func TestDriverRenderRDMA(t *testing.T) { require.Nil(t, err) require.NotEmpty(t, objs) - ds, err := getDaemonSetObj(objs) - require.Nil(t, err) - require.NotNil(t, ds) - - nvidiaDriverCtr, err := getContainerObj(ds.Spec.Template.Spec.Containers, "nvidia-driver-ctr") - require.Nil(t, err, "nvidia-driver-ctr should be in the list of containers") - - driverEnvars := []corev1.EnvVar{ - { - Name: "NVIDIA_VISIBLE_DEVICES", - Value: "void", - }, - { - Name: "GPU_DIRECT_RDMA_ENABLED", - Value: "true", - }, - } - checkEnv(t, driverEnvars, nvidiaDriverCtr.Env) - - nvidiaPeermemCtr, err := getContainerObj(ds.Spec.Template.Spec.Containers, "nvidia-peermem-ctr") - require.Nil(t, err, "nvidia-peermem-ctr should be in the list of containers") - - peermemEnvars := []corev1.EnvVar{ - { - Name: "NVIDIA_VISIBLE_DEVICES", - Value: "void", - }, - } - - checkEnv(t, peermemEnvars, nvidiaPeermemCtr.Env) - - expectedVolumes := getDriverVolumes() - expectedVolumes = append(expectedVolumes, corev1.Volume{ - Name: "mlnx-ofed-usr-src", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/mellanox/drivers/usr/src", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, - }, - }) - - checkVolumes(t, expectedVolumes, ds.Spec.Template.Spec.Volumes) - actual, err := getYAMLString(objs) require.Nil(t, err) @@ -180,6 +135,8 @@ func TestDriverRDMAHostMOFED(t *testing.T) { renderData := getMinimalDriverRenderData() + renderData.AdditionalConfigs = getSampleAdditionalConfigs() + renderData.GPUDirectRDMA = &nvidiav1alpha1.GPUDirectRDMASpec{ Enabled: utils.BoolPtr(true), UseHostMOFED: utils.BoolPtr(true), @@ -300,6 +257,8 @@ func TestDriverGDS(t *testing.T) { renderData := getMinimalDriverRenderData() + renderData.AdditionalConfigs = getSampleAdditionalConfigs() + renderData.GDS = &gdsDriverSpec{ ImagePath: "nvcr.io/nvidia/cloud-native/nvidia-fs:2.16.1", Spec: &nvidiav1alpha1.GPUDirectStorageSpec{ @@ -336,6 +295,8 @@ func TestDriverGDRCopy(t *testing.T) { renderData := getMinimalDriverRenderData() + renderData.AdditionalConfigs = getSampleAdditionalConfigs() + renderData.GDRCopy = &gdrcopyDriverSpec{ ImagePath: "nvcr.io/nvidia/cloud-native/gdrdrv:v2.4.1", Spec: &nvidiav1alpha1.GDRCopySpec{ @@ -373,6 +334,7 @@ func TestDriverGDRCopyOpenShift(t *testing.T) { require.True(t, ok) renderData := getMinimalDriverRenderData() + renderData.AdditionalConfigs = getSampleAdditionalConfigs() renderData.Driver.Name = "nvidia-gpu-driver-openshift" renderData.Driver.AppName = "nvidia-gpu-driver-openshift-79d6bd954f" renderData.Driver.ImagePath = "nvcr.io/nvidia/driver:525.85.03-rhel8.0" @@ -428,61 +390,7 @@ func TestDriverAdditionalConfigs(t *testing.T) { renderData := getMinimalDriverRenderData() - renderData.AdditionalConfigs = &additionalConfigs{ - VolumeMounts: []corev1.VolumeMount{ - { - Name: "test-cm", - ReadOnly: true, - MountPath: "/opt/config/test-file", - SubPath: "test-file", - }, - { - Name: "test-host-path", - MountPath: "/opt/config/test-host-path", - }, - { - Name: "test-host-path-ro", - MountPath: "/opt/config/test-host-path-ro", - ReadOnly: true, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "test-cm", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "test-cm", - }, - Items: []corev1.KeyToPath{ - { - Key: "test-file", - Path: "test-file", - }, - }, - }, - }, - }, - { - Name: "test-host-path", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/opt/config/test-host-path", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "test-host-path-ro", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/opt/config/test-host-path-ro", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - }, - } + renderData.AdditionalConfigs = getSampleAdditionalConfigs() objs, err := stateDriver.renderer.RenderObjects( &render.TemplatingData{ @@ -668,29 +576,43 @@ func TestVGPUHostManagerDaemonset(t *testing.T) { require.Equal(t, string(o), actual) } -func getDaemonSetObj(objs []*unstructured.Unstructured) (*appsv1.DaemonSet, error) { - ds := &appsv1.DaemonSet{} +func TestVGPUHostManagerDaemonsetOpenShift(t *testing.T) { + const ( + testName = "driver-vgpu-host-manager-openshift" + rhcosVersion = "413.92.202304252344-0" + toolkitImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7fecaebc1d51b28bc3548171907e4d91823a031d7a6a694ab686999be2b4d867" + ) + state, err := NewStateDriver(nil, nil, manifestDir) + require.Nil(t, err) + stateDriver, ok := state.(*stateDriver) + require.True(t, ok) - for _, obj := range objs { - if obj.GetKind() == "DaemonSet" { - err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, ds) - if err != nil { - return nil, err - } - return ds, nil - } + renderData := getMinimalDriverRenderData() + renderData.Driver.Spec.DriverType = nvidiav1alpha1.VGPUHostManager + renderData.Driver.Name = "nvidia-vgpu-manager-openshift" + renderData.Driver.AppName = "nvidia-vgpu-manager-openshift-7c6d7bd86b" + renderData.Driver.ImagePath = "nvcr.io/nvidia/vgpu-manager:525.85.03-rhel8.0" + renderData.Driver.OSVersion = "rhel8.0" + renderData.Openshift = &openshiftSpec{ + ToolkitImage: toolkitImage, + RHCOSVersion: rhcosVersion, } + renderData.Runtime.OpenshiftDriverToolkitEnabled = true + renderData.Runtime.OpenshiftVersion = "4.13" - return nil, fmt.Errorf("could not find object of kind 'DaemonSet'") -} + objs, err := stateDriver.renderer.RenderObjects( + &render.TemplatingData{ + Data: renderData, + }) + require.Nil(t, err) -func getContainerObj(containers []corev1.Container, name string) (corev1.Container, error) { - for _, c := range containers { - if c.Name == name { - return c, nil - } - } - return corev1.Container{}, fmt.Errorf("failed to find container with name '%s'", name) + actual, err := getYAMLString(objs) + require.Nil(t, err) + + o, err := os.ReadFile(filepath.Join(manifestResultDir, testName+".yaml")) + require.Nil(t, err) + + require.Equal(t, string(o), actual) } func getMinimalDriverRenderData() *driverRenderData { @@ -712,6 +634,7 @@ func getMinimalDriverRenderData() *driverRenderData { Namespace: "test-operator", KubernetesVersion: "1.28.0", }, + HostRoot: "", } } @@ -725,150 +648,58 @@ func getDefaultContainerProbeSpec() *nvidiav1alpha1.ContainerProbeSpec { } } -func checkEnv(t *testing.T, input []corev1.EnvVar, output []corev1.EnvVar) { - inputMap := map[string]string{} - for _, env := range input { - inputMap[env.Name] = env.Value - } - - outputMap := map[string]string{} - for _, env := range output { - outputMap[env.Name] = env.Value - } - - for key, value := range inputMap { - outputValue, exists := outputMap[key] - require.True(t, exists) - require.Equal(t, value, outputValue) - } -} - -func checkVolumes(t *testing.T, expected []corev1.Volume, actual []corev1.Volume) { - expectedMap := volumeSliceToMap(expected) - actualMap := volumeSliceToMap(actual) - - require.Equal(t, len(expectedMap), len(actualMap)) - - for k, vol := range expectedMap { - expectedVol, exists := actualMap[k] - require.True(t, exists) - require.Equal(t, expectedVol.HostPath.Path, vol.HostPath.Path, - "Mismatch in Host Path value for volume %s", vol.Name) - require.Equal(t, expectedVol.HostPath.Type, vol.HostPath.Type, - "Mismatch in Host Path type for volume %s", vol.Name) - } -} - -func volumeSliceToMap(volumes []corev1.Volume) map[string]corev1.Volume { - volumeMap := map[string]corev1.Volume{} - for _, v := range volumes { - volumeMap[v.Name] = v - } - - return volumeMap -} - -func getDriverVolumes() []corev1.Volume { - return []corev1.Volume{ - { - Name: "run-nvidia", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/nvidia", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "var-log", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/log", - }, - }, - }, - { - Name: "dev-log", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/dev/log", - }, - }, - }, - { - Name: "host-os-release", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/etc/os-release", - }, - }, - }, - { - Name: "run-nvidia-topologyd", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/nvidia-topologyd", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "run-mellanox-drivers", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/mellanox/drivers", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "run-nvidia-validations", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/nvidia/validations", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), - }, +func getSampleAdditionalConfigs() *additionalConfigs { + return &additionalConfigs{ + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-cm", + ReadOnly: true, + MountPath: "/opt/config/test-file", + SubPath: "test-file", }, - }, - { - Name: "host-root", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/", - }, + { + Name: "test-host-path", + MountPath: "/opt/config/test-host-path", }, - }, - { - Name: "host-sys", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/sys", - Type: newHostPathType(corev1.HostPathDirectory), - }, + { + Name: "test-host-path-ro", + MountPath: "/opt/config/test-host-path-ro", + ReadOnly: true, }, }, - { - Name: "firmware-search-path", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/sys/module/firmware_class/parameters/path", + Volumes: []corev1.Volume{ + { + Name: "test-cm", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-cm", + }, + Items: []corev1.KeyToPath{ + { + Key: "test-file", + Path: "test-file", + }, + }, + }, }, }, - }, - { - Name: "sysfs-memory-online", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/sys/devices/system/memory/auto_online_blocks", + { + Name: "test-host-path", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/opt/config/test-host-path", + Type: newHostPathType(corev1.HostPathDirectoryOrCreate), + }, }, }, - }, - { - Name: "nv-firmware", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/nvidia/driver/lib/firmware", - Type: newHostPathType(corev1.HostPathDirectoryOrCreate), + { + Name: "test-host-path-ro", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/opt/config/test-host-path-ro", + Type: newHostPathType(corev1.HostPathDirectoryOrCreate), + }, }, }, }, diff --git a/internal/state/driver_volumes.go b/internal/state/driver_volumes.go index 66ccf56a9..b11ad512b 100644 --- a/internal/state/driver_volumes.go +++ b/internal/state/driver_volumes.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/NVIDIA/gpu-operator/api/v1alpha1" + "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/controllers/clusterinfo" "github.com/NVIDIA/gpu-operator/internal/consts" ) diff --git a/internal/state/manager.go b/internal/state/manager.go index 89acc5799..b61eb77b5 100644 --- a/internal/state/manager.go +++ b/internal/state/manager.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/internal/consts" ) diff --git a/internal/state/testdata/golden/driver-additional-configs.yaml b/internal/state/testdata/golden/driver-additional-configs.yaml index f4eb1a5d4..2200df4f4 100644 --- a/internal/state/testdata/golden/driver-additional-configs.yaml +++ b/internal/state/testdata/golden/driver-additional-configs.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/driver:525.85.03-ubuntu22.04 imagePullPolicy: IfNotPresent lifecycle: diff --git a/internal/state/testdata/golden/driver-full-spec.yaml b/internal/state/testdata/golden/driver-full-spec.yaml index c3df3c898..60065333e 100644 --- a/internal/state/testdata/golden/driver-full-spec.yaml +++ b/internal/state/testdata/golden/driver-full-spec.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -158,6 +138,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: OPEN_KERNEL_MODULES_ENABLED value: "true" - name: FOO diff --git a/internal/state/testdata/golden/driver-gdrcopy-openshift.yaml b/internal/state/testdata/golden/driver-gdrcopy-openshift.yaml index 67e1f93fe..0324944d0 100644 --- a/internal/state/testdata/golden/driver-gdrcopy-openshift.yaml +++ b/internal/state/testdata/golden/driver-gdrcopy-openshift.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -208,6 +188,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: OPENSHIFT_VERSION value: "4.13" - name: HTTP_PROXY @@ -272,6 +260,15 @@ spec: name: sysfs-memory-online - mountPath: /lib/firmware name: nv-firmware + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true - mountPath: /mnt/shared-nvidia-driver-toolkit name: shared-nvidia-driver-toolkit - mountPath: /etc/pki/ca-trust/extracted/pem @@ -321,6 +318,15 @@ spec: readOnly: true - mountPath: /mnt/shared-nvidia-driver-toolkit name: shared-nvidia-driver-toolkit + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true - args: - until [ -f /mnt/shared-nvidia-driver-toolkit/dir_prepared ]; do echo Waiting for nvidia-driver-ctr container to prepare the shared directory ...; sleep @@ -353,6 +359,10 @@ spec: - mountPath: /host-etc/os-release name: host-os-release readOnly: true + - mountPath: /sys/module/firmware_class/parameters/path + name: firmware-search-path + - mountPath: /lib/firmware + name: nv-firmware hostPID: true imagePullSecrets: - name: ngc-secret @@ -458,6 +468,20 @@ spec: path: /run/nvidia/driver/lib/firmware type: DirectoryOrCreate name: nv-firmware + - configMap: + items: + - key: test-file + path: test-file + name: test-cm + name: test-cm + - hostPath: + path: /opt/config/test-host-path + type: DirectoryOrCreate + name: test-host-path + - hostPath: + path: /opt/config/test-host-path-ro + type: DirectoryOrCreate + name: test-host-path-ro - emptyDir: {} name: shared-nvidia-driver-toolkit - configMap: diff --git a/internal/state/testdata/golden/driver-gdrcopy.yaml b/internal/state/testdata/golden/driver-gdrcopy.yaml index ae5f98395..eedde2670 100644 --- a/internal/state/testdata/golden/driver-gdrcopy.yaml +++ b/internal/state/testdata/golden/driver-gdrcopy.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/driver:525.85.03-ubuntu22.04 imagePullPolicy: IfNotPresent lifecycle: @@ -202,6 +190,15 @@ spec: name: sysfs-memory-online - mountPath: /lib/firmware name: nv-firmware + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true - args: - until [ -d /run/nvidia/driver/usr/src ] && lsmod | grep nvidia; do echo Waiting for nvidia-driver to be installed...; sleep 10; done; exec nvidia-gdrcopy-driver @@ -247,6 +244,15 @@ spec: - mountPath: /dev/log name: dev-log readOnly: true + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true hostPID: true imagePullSecrets: - name: ngc-secrets @@ -351,6 +357,20 @@ spec: path: /run/nvidia/driver/lib/firmware type: DirectoryOrCreate name: nv-firmware + - configMap: + items: + - key: test-file + path: test-file + name: test-cm + name: test-cm + - hostPath: + path: /opt/config/test-host-path + type: DirectoryOrCreate + name: test-host-path + - hostPath: + path: /opt/config/test-host-path-ro + type: DirectoryOrCreate + name: test-host-path-ro updateStrategy: type: OnDelete --- diff --git a/internal/state/testdata/golden/driver-gds.yaml b/internal/state/testdata/golden/driver-gds.yaml index 934c7d095..1ebf80c70 100644 --- a/internal/state/testdata/golden/driver-gds.yaml +++ b/internal/state/testdata/golden/driver-gds.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/driver:525.85.03-ubuntu22.04 imagePullPolicy: IfNotPresent lifecycle: @@ -202,6 +190,15 @@ spec: name: sysfs-memory-online - mountPath: /lib/firmware name: nv-firmware + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true - args: - until [ -d /run/nvidia/driver/usr/src ] && lsmod | grep nvidia; do echo Waiting for nvidia-driver to be installed...; sleep 10; done; exec nvidia-gds-driver @@ -247,6 +244,15 @@ spec: - mountPath: /dev/log name: dev-log readOnly: true + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true hostPID: true imagePullSecrets: - name: ngc-secrets @@ -351,6 +357,20 @@ spec: path: /run/nvidia/driver/lib/firmware type: DirectoryOrCreate name: nv-firmware + - configMap: + items: + - key: test-file + path: test-file + name: test-cm + name: test-cm + - hostPath: + path: /opt/config/test-host-path + type: DirectoryOrCreate + name: test-host-path + - hostPath: + path: /opt/config/test-host-path-ro + type: DirectoryOrCreate + name: test-host-path-ro updateStrategy: type: OnDelete --- diff --git a/internal/state/testdata/golden/driver-minimal.yaml b/internal/state/testdata/golden/driver-minimal.yaml index 4feebe5ef..f834e2474 100644 --- a/internal/state/testdata/golden/driver-minimal.yaml +++ b/internal/state/testdata/golden/driver-minimal.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/driver:525.85.03-ubuntu22.04 imagePullPolicy: IfNotPresent lifecycle: diff --git a/internal/state/testdata/golden/driver-openshift-drivertoolkit.yaml b/internal/state/testdata/golden/driver-openshift-drivertoolkit.yaml index ab29eca7d..ad63e44a9 100644 --- a/internal/state/testdata/golden/driver-openshift-drivertoolkit.yaml +++ b/internal/state/testdata/golden/driver-openshift-drivertoolkit.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -208,6 +188,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: OPENSHIFT_VERSION value: "4.13" - name: HTTP_PROXY @@ -307,6 +295,10 @@ spec: - mountPath: /host-etc/os-release name: host-os-release readOnly: true + - mountPath: /sys/module/firmware_class/parameters/path + name: firmware-search-path + - mountPath: /lib/firmware + name: nv-firmware hostPID: true initContainers: - args: diff --git a/internal/state/testdata/golden/driver-precompiled.yaml b/internal/state/testdata/golden/driver-precompiled.yaml index ed9f6b6ff..2b0728407 100644 --- a/internal/state/testdata/golden/driver-precompiled.yaml +++ b/internal/state/testdata/golden/driver-precompiled.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -154,6 +134,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/driver:535-5.4.0-150-generic-ubuntu22.04 imagePullPolicy: IfNotPresent lifecycle: diff --git a/internal/state/testdata/golden/driver-rdma-hostmofed.yaml b/internal/state/testdata/golden/driver-rdma-hostmofed.yaml index 679d8cca3..29a712342 100644 --- a/internal/state/testdata/golden/driver-rdma-hostmofed.yaml +++ b/internal/state/testdata/golden/driver-rdma-hostmofed.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: GPU_DIRECT_RDMA_ENABLED value: "true" - name: USE_HOST_MOFED @@ -206,6 +194,15 @@ spec: name: sysfs-memory-online - mountPath: /lib/firmware name: nv-firmware + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true - args: - reload_nvidia_peermem command: @@ -256,6 +253,15 @@ spec: - mountPath: /run/mellanox/drivers mountPropagation: HostToContainer name: run-mellanox-drivers + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true hostPID: true initContainers: - args: @@ -362,6 +368,20 @@ spec: path: /run/nvidia/driver/lib/firmware type: DirectoryOrCreate name: nv-firmware + - configMap: + items: + - key: test-file + path: test-file + name: test-cm + name: test-cm + - hostPath: + path: /opt/config/test-host-path + type: DirectoryOrCreate + name: test-host-path + - hostPath: + path: /opt/config/test-host-path-ro + type: DirectoryOrCreate + name: test-host-path-ro updateStrategy: type: OnDelete --- diff --git a/internal/state/testdata/golden/driver-rdma.yaml b/internal/state/testdata/golden/driver-rdma.yaml index b33e779f7..b43d212c5 100644 --- a/internal/state/testdata/golden/driver-rdma.yaml +++ b/internal/state/testdata/golden/driver-rdma.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP - name: GPU_DIRECT_RDMA_ENABLED value: "true" image: nvcr.io/nvidia/driver:525.85.03-ubuntu22.04 @@ -204,6 +192,15 @@ spec: name: sysfs-memory-online - mountPath: /lib/firmware name: nv-firmware + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true - args: - reload_nvidia_peermem command: @@ -252,6 +249,15 @@ spec: - mountPath: /run/mellanox/drivers mountPropagation: HostToContainer name: run-mellanox-drivers + - mountPath: /opt/config/test-file + name: test-cm + readOnly: true + subPath: test-file + - mountPath: /opt/config/test-host-path + name: test-host-path + - mountPath: /opt/config/test-host-path-ro + name: test-host-path-ro + readOnly: true hostPID: true initContainers: - args: @@ -356,6 +362,20 @@ spec: path: /run/nvidia/driver/lib/firmware type: DirectoryOrCreate name: nv-firmware + - configMap: + items: + - key: test-file + path: test-file + name: test-cm + name: test-cm + - hostPath: + path: /opt/config/test-host-path + type: DirectoryOrCreate + name: test-host-path + - hostPath: + path: /opt/config/test-host-path-ro + type: DirectoryOrCreate + name: test-host-path-ro updateStrategy: type: OnDelete --- diff --git a/internal/state/testdata/golden/driver-vgpu-host-manager-openshift.yaml b/internal/state/testdata/golden/driver-vgpu-host-manager-openshift.yaml new file mode 100644 index 000000000..f21a74423 --- /dev/null +++ b/internal/state/testdata/golden/driver-vgpu-host-manager-openshift.yaml @@ -0,0 +1,376 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nvidia-vgpu-manager-openshift + namespace: test-operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: nvidia-vgpu-manager-openshift + namespace: test-operator +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nvidia-vgpu-manager-openshift +rules: +- apiGroups: + - config.openshift.io + resources: + - clusterversions + verbs: + - get + - list +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + - pods/eviction + verbs: + - create + - delete + - get + - list + - patch + - update +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: nvidia-vgpu-manager-openshift + namespace: test-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nvidia-vgpu-manager-openshift +subjects: +- kind: ServiceAccount + name: nvidia-vgpu-manager-openshift + namespace: test-operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nvidia-vgpu-manager-openshift +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nvidia-vgpu-manager-openshift +subjects: +- kind: ServiceAccount + name: nvidia-vgpu-manager-openshift + namespace: test-operator +--- +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: false +allowHostPID: true +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: +- '*' +allowedUnsafeSysctls: +- '*' +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +- system:nodes +- system:masters +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'privileged allows access to all privileged and host + features and the ability to run as any user, any group, any fsGroup, and with + any SELinux context. WARNING: this is the most relaxed SCC and should be used + only for cluster administration. Grant with caution.' + name: nvidia-vgpu-manager-openshift +priority: null +readOnlyRootFilesystem: false +requiredDropCapabilities: null +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- '*' +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:test-operator:nvidia-vgpu-manager-openshift +volumes: +- '*' +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + openshift.io/scc: nvidia-vgpu-manager-openshift + labels: + app: nvidia-vgpu-manager-openshift-7c6d7bd86b + app.kubernetes.io/component: nvidia-vgpu-host-manager + nvidia.com/node.os-version: rhel8.0 + nvidia.com/precompiled: "false" + openshift.driver-toolkit: "true" + openshift.driver-toolkit.rhcos: 413.92.202304252344-0 + name: nvidia-vgpu-manager-openshift-7c6d7bd86b + namespace: test-operator +spec: + selector: + matchLabels: + app: nvidia-vgpu-manager-openshift-7c6d7bd86b + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: nvidia-driver-ctr + labels: + app: nvidia-vgpu-manager-openshift-7c6d7bd86b + app.kubernetes.io/component: nvidia-vgpu-host-manager + nvidia.com/node.os-version: rhel8.0 + nvidia.com/precompiled: "false" + openshift.driver-toolkit: "true" + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - nvidia-driver + - nvidia-vgpu-manager + topologyKey: kubernetes.io/hostname + containers: + - args: + - nv-ctr-run-with-dtk + command: + - ocp_dtk_entrypoint + env: + - name: NVIDIA_VISIBLE_DEVICES + value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OPENSHIFT_VERSION + value: "4.13" + image: nvcr.io/nvidia/vgpu-manager:525.85.03-rhel8.0 + imagePullPolicy: IfNotPresent + name: nvidia-driver-ctr + securityContext: + privileged: true + seLinuxOptions: + level: s0 + volumeMounts: + - mountPath: /run/nvidia + mountPropagation: Bidirectional + name: run-nvidia + - mountPath: /run/nvidia-topologyd + name: run-nvidia-topologyd + - mountPath: /var/log + name: var-log + - mountPath: /dev/log + name: dev-log + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + - mountPath: /sys/fs/cgroup + name: cgroup + - mountPath: /dev/vfio + name: vfio + - mountPath: /run/mellanox/drivers/usr/src + mountPropagation: HostToContainer + name: mlnx-ofed-usr-src + - mountPath: /run/mellanox/drivers + mountPropagation: HostToContainer + name: run-mellanox-drivers + - mountPath: /sys/module/firmware_class/parameters/path + name: firmware-search-path + - mountPath: /sys/devices/system/memory/auto_online_blocks + name: sysfs-memory-online + - mountPath: /lib/firmware + name: nv-firmware + - mountPath: /mnt/shared-nvidia-driver-toolkit + name: shared-nvidia-driver-toolkit + - args: + - until [ -f /mnt/shared-nvidia-driver-toolkit/dir_prepared ]; do echo Waiting + for nvidia-driver-ctr container to prepare the shared directory ...; sleep + 10; done; exec /mnt/shared-nvidia-driver-toolkit/ocp_dtk_entrypoint dtk-build-driver + command: + - bash + - -xc + env: + - name: RHCOS_VERSION + value: 413.92.202304252344-0 + - name: NVIDIA_VISIBLE_DEVICES + value: void + image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7fecaebc1d51b28bc3548171907e4d91823a031d7a6a694ab686999be2b4d867 + imagePullPolicy: IfNotPresent + name: openshift-driver-toolkit-ctr + securityContext: + privileged: true + seLinuxOptions: + level: s0 + volumeMounts: + - mountPath: /mnt/shared-nvidia-driver-toolkit + name: shared-nvidia-driver-toolkit + - mountPath: /var/log + name: var-log + - mountPath: /run/mellanox/drivers/usr/src + mountPropagation: HostToContainer + name: mlnx-ofed-usr-src + - mountPath: /host-etc/os-release + name: host-os-release + readOnly: true + - mountPath: /sys/module/firmware_class/parameters/path + name: firmware-search-path + - mountPath: /lib/firmware + name: nv-firmware + hostPID: true + initContainers: + - args: + - uninstall_driver + command: + - driver-manager + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NVIDIA_VISIBLE_DEVICES + value: void + - name: ENABLE_GPU_POD_EVICTION + value: "true" + - name: ENABLE_AUTO_DRAIN + value: "false" + - name: DRAIN_USE_FORCE + value: "false" + - name: DRAIN_POD_SELECTOR_LABEL + value: "" + - name: DRAIN_TIMEOUT_SECONDS + value: 0s + - name: DRAIN_DELETE_EMPTYDIR_DATA + value: "false" + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager:devel + imagePullPolicy: IfNotPresent + name: k8s-driver-manager + securityContext: + privileged: true + volumeMounts: + - mountPath: /run/nvidia + mountPropagation: Bidirectional + name: run-nvidia + - mountPath: /host + mountPropagation: HostToContainer + name: host-root + readOnly: true + - mountPath: /sys + name: host-sys + - mountPath: /run/mellanox/drivers + mountPropagation: HostToContainer + name: run-mellanox-drivers + nodeSelector: + feature.node.kubernetes.io/system-os_release.OSTREE_VERSION: 413.92.202304252344-0 + nvidia.com/gpu.deploy.vgpu-manager: "true" + priorityClassName: system-node-critical + serviceAccountName: nvidia-vgpu-manager-openshift + tolerations: + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + volumes: + - hostPath: + path: /run/nvidia + type: DirectoryOrCreate + name: run-nvidia + - hostPath: + path: /var/log + name: var-log + - hostPath: + path: /dev/log + name: dev-log + - hostPath: + path: /etc/os-release + name: host-os-release + - hostPath: + path: /sys/fs/cgroup + name: cgroup + - hostPath: + path: /dev/vfio + name: vfio + - hostPath: + path: /run/nvidia-topologyd + type: DirectoryOrCreate + name: run-nvidia-topologyd + - hostPath: + path: /run/mellanox/drivers/usr/src + type: DirectoryOrCreate + name: mlnx-ofed-usr-src + - hostPath: + path: /run/mellanox/drivers + type: DirectoryOrCreate + name: run-mellanox-drivers + - hostPath: + path: /run/nvidia/validations + type: DirectoryOrCreate + name: run-nvidia-validations + - hostPath: + path: / + name: host-root + - hostPath: + path: /sys + type: Directory + name: host-sys + - hostPath: + path: /sys/module/firmware_class/parameters/path + name: firmware-search-path + - hostPath: + path: /sys/devices/system/memory/auto_online_blocks + name: sysfs-memory-online + - hostPath: + path: /run/nvidia/driver/lib/firmware + type: DirectoryOrCreate + name: nv-firmware + - emptyDir: {} + name: shared-nvidia-driver-toolkit + updateStrategy: + type: OnDelete +--- diff --git a/internal/state/testdata/golden/driver-vgpu-host-manager.yaml b/internal/state/testdata/golden/driver-vgpu-host-manager.yaml index 10498a256..c2145ee41 100644 --- a/internal/state/testdata/golden/driver-vgpu-host-manager.yaml +++ b/internal/state/testdata/golden/driver-vgpu-host-manager.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/vgpu-manager:525.85.03-ubuntu22.04 imagePullPolicy: IfNotPresent name: nvidia-driver-ctr diff --git a/internal/state/testdata/golden/driver-vgpu-licensing.yaml b/internal/state/testdata/golden/driver-vgpu-licensing.yaml index 90d29b7be..96e505895 100644 --- a/internal/state/testdata/golden/driver-vgpu-licensing.yaml +++ b/internal/state/testdata/golden/driver-vgpu-licensing.yaml @@ -18,23 +18,6 @@ rules: - securitycontextconstraints verbs: - use -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -73,12 +56,9 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -152,6 +132,14 @@ spec: env: - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP image: nvcr.io/nvidia/driver:525.85.03-ubuntu22.04 imagePullPolicy: IfNotPresent lifecycle: diff --git a/internal/state/types.go b/internal/state/types.go index d4af979f5..000eb10f2 100644 --- a/internal/state/types.go +++ b/internal/state/types.go @@ -20,7 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/source" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" ) type ctrlManager ctrl.Manager diff --git a/internal/utils/utils.go b/internal/utils/utils.go index ae51d1468..87cc70bce 100644 --- a/internal/utils/utils.go +++ b/internal/utils/utils.go @@ -24,7 +24,6 @@ import ( "strings" "github.com/davecgh/go-spew/spew" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/rand" ) @@ -64,7 +63,7 @@ func BoolPtr(v bool) *bool { } // GetObjectHash invokes Sum32 Hash function to return hash value of an unstructured Object -func GetObjectHash(obj *unstructured.Unstructured) string { +func GetObjectHash(obj interface{}) string { hasher := fnv.New32a() printer := spew.ConfigState{ Indent: " ", diff --git a/internal/validator/validator.go b/internal/validator/validator.go index ca3a19ded..e43c8127c 100644 --- a/internal/validator/validator.go +++ b/internal/validator/validator.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" ) // Validator provides interface to validate NVIDIADriver fields @@ -51,21 +51,21 @@ func (nsv *nodeSelectorValidator) Validate(ctx context.Context, cr *nvidiav1alph return err } - names := []string{} - for _, driver := range drivers.Items { - driver := driver - nodeList, err := nsv.getNVIDIADriverSelectedNodes(ctx, &driver) + names := map[string]struct{}{} + for di := range drivers.Items { + nodeList, err := nsv.getNVIDIADriverSelectedNodes(ctx, &drivers.Items[di]) if err != nil { return err } - for _, n := range nodeList.Items { - names = append(names, n.Name) + for ni := range nodeList.Items { + if _, ok := names[nodeList.Items[ni].Name]; ok { + return fmt.Errorf("conflicting NVIDIADriver NodeSelectors found for resource: %s, nodeSelector: %q", cr.Name, cr.Spec.NodeSelector) + } + + names[nodeList.Items[ni].Name] = struct{}{} } - } - if containsDuplicates(names) { - return fmt.Errorf("conflicting NVIDIADriver NodeSelectors found for resource: %s, nodeSelector: %q", cr.Name, cr.Spec.NodeSelector) } return nil @@ -88,14 +88,3 @@ func (nsv *nodeSelectorValidator) getNVIDIADriverSelectedNodes(ctx context.Conte return nodeList, err } - -func containsDuplicates(arr []string) bool { - visited := make(map[string]bool, 0) - for _, e := range arr { - if _, exists := visited[e]; exists { - return true - } - visited[e] = true - } - return false -} diff --git a/internal/validator/validator_test.go b/internal/validator/validator_test.go index 5d926bf3c..8171f6cf5 100644 --- a/internal/validator/validator_test.go +++ b/internal/validator/validator_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" - nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/v1alpha1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" ) const ( @@ -51,7 +51,7 @@ func makeTestDriver(opts ...driverOptions) *nvidiav1alpha1.NVIDIADriver { c.Kind = reflect.TypeOf(nvidiav1alpha1.NVIDIADriver{}).Name() - gvk := nvidiav1alpha1.GroupVersion.WithKind(c.Kind) + gvk := nvidiav1alpha1.SchemeGroupVersion.WithKind(c.Kind) c.APIVersion = gvk.GroupVersion().String() @@ -128,17 +128,3 @@ func TestCheckNodeSelector(t *testing.T) { } } } - -func TestContainsDuplicates(t *testing.T) { - tests := []struct { - arr []string - shouldReturnTrue bool - }{ - {arr: []string{"foo", "bar"}, shouldReturnTrue: false}, - {arr: []string{"foo", "foo"}, shouldReturnTrue: true}, - } - - for _, tc := range tests { - assert.Equal(t, tc.shouldReturnTrue, containsDuplicates(tc.arr)) - } -} diff --git a/manifests/state-driver/0200_role.yaml b/manifests/state-driver/0200_role.yaml index ec2d6434f..e3a8a3287 100644 --- a/manifests/state-driver/0200_role.yaml +++ b/manifests/state-driver/0200_role.yaml @@ -12,20 +12,3 @@ rules: - use resourceNames: - privileged -- apiGroups: - - "" - resources: - - pods - - pods/eviction - - nodes - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' diff --git a/manifests/state-driver/0210_clusterrole.yaml b/manifests/state-driver/0210_clusterrole.yaml index 47c56af59..c0db68d62 100644 --- a/manifests/state-driver/0210_clusterrole.yaml +++ b/manifests/state-driver/0210_clusterrole.yaml @@ -35,9 +35,6 @@ rules: - apiGroups: - apps resources: - - deployments - daemonsets - - replicasets - - statefulsets verbs: - - '*' + - get diff --git a/manifests/state-driver/0500_daemonset.yaml b/manifests/state-driver/0500_daemonset.yaml index d553681d9..26dfecf15 100644 --- a/manifests/state-driver/0500_daemonset.yaml +++ b/manifests/state-driver/0500_daemonset.yaml @@ -205,6 +205,14 @@ spec: # always use runc for driver containers - name: NVIDIA_VISIBLE_DEVICES value: void + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP {{- if deref .Driver.Spec.UseOpenKernelModules }} - name: OPEN_KERNEL_MODULES_ENABLED value: "true" @@ -427,8 +435,12 @@ spec: {{- range .AdditionalConfigs.VolumeMounts }} - name: {{ .Name }} mountPath: {{ .MountPath }} + {{- if .SubPath }} subPath: {{ .SubPath }} + {{- end }} + {{- if .ReadOnly }} readOnly: {{ .ReadOnly }} + {{- end}} {{- end }} {{- end }} startupProbe: @@ -490,10 +502,14 @@ spec: {{- end}} {{- if and .AdditionalConfigs .AdditionalConfigs.VolumeMounts }} {{- range .AdditionalConfigs.VolumeMounts }} - - name: {{ .Name }} - mountPath: {{ .MountPath }} - subPath: {{ .SubPath }} - readOnly: {{ .ReadOnly }} + - name: {{ .Name }} + mountPath: {{ .MountPath }} + {{- if .SubPath }} + subPath: {{ .SubPath }} + {{- end }} + {{- if .ReadOnly }} + readOnly: {{ .ReadOnly }} + {{- end }} {{- end }} {{- end }} startupProbe: @@ -564,6 +580,10 @@ spec: - name: host-os-release mountPath: /host-etc/os-release readOnly: true + - name: firmware-search-path + mountPath: /sys/module/firmware_class/parameters/path + - name: nv-firmware + mountPath: /lib/firmware {{- end }} volumes: - name: run-nvidia @@ -609,7 +629,7 @@ spec: type: DirectoryOrCreate - name: host-root hostPath: - path: "/" + path: {{ .HostRoot | default "/" }} - name: host-sys hostPath: path: /sys diff --git a/multi-arch.mk b/multi-arch.mk index 4f199ed5d..d0f4f06ac 100644 --- a/multi-arch.mk +++ b/multi-arch.mk @@ -13,7 +13,8 @@ # limitations under the License. PUSH_ON_BUILD ?= false -DOCKER_BUILD_OPTIONS = --output=type=image,push=$(PUSH_ON_BUILD) +ATTACH_ATTESTATIONS ?= false +DOCKER_BUILD_OPTIONS = --output=type=image,push=$(PUSH_ON_BUILD) --provenance=$(ATTACH_ATTESTATIONS) --sbom=$(ATTACH_ATTESTATIONS) DOCKER_BUILD_PLATFORM_OPTIONS = --platform=linux/amd64,linux/arm64 REGCTL ?= regctl @@ -21,8 +22,3 @@ $(PUSH_TARGETS): push-%: $(REGCTL) \ image copy \ $(IMAGE) $(OUT_IMAGE) - -push-short: - $(REGCTL) \ - image copy \ - $(IMAGE) $(OUT_IMAGE_NAME):$(OUT_IMAGE_VERSION) \ No newline at end of file diff --git a/native-only.mk b/native-only.mk index d541b8186..19dd4bd04 100644 --- a/native-only.mk +++ b/native-only.mk @@ -18,8 +18,3 @@ $(PUSH_TARGETS): OUT_IMAGE ?= $(IMAGE_NAME):$(IMAGE_TAG) $(PUSH_TARGETS): push-%: $(DOCKER) tag "$(IMAGE_NAME):$(VERSION)-$(DEFAULT_PUSH_TARGET)" "$(OUT_IMAGE)" $(DOCKER) push "$(OUT_IMAGE)" - -push-short: OUT_IMAGE ?= $(IMAGE_NAME):$(VERSION) -push-short: - $(DOCKER) tag "$(IMAGE_NAME):$(VERSION)-$(DEFAULT_PUSH_TARGET)" "$(OUT_IMAGE)" - $(DOCKER) push "$(OUT_IMAGE)" diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go index 2e19eea32..969e674bb 100644 --- a/tests/e2e/framework/framework.go +++ b/tests/e2e/framework/framework.go @@ -190,7 +190,7 @@ func (f *Framework) AfterEach(ctx context.Context) { for namespaceKey, namespaceErr := range nsDeletionErrors { messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr)) } - e2elog.Failf(strings.Join(messages, ",")) + ginkgo.Fail(strings.Join(messages, ",")) } }() diff --git a/tests/holodeck.yaml b/tests/holodeck.yaml new file mode 100644 index 000000000..47efd2dd7 --- /dev/null +++ b/tests/holodeck.yaml @@ -0,0 +1,34 @@ +apiVersion: holodeck.nvidia.com/v1alpha1 +kind: Environment +metadata: + name: HOLODECK_NAME + description: "end-to-end test infrastructure" +spec: + provider: aws + auth: + keyName: cnt-ci + privateKey: HOLODECK_PRIVATE_KEY + instance: + type: g4dn.xlarge + region: us-west-1 + ingressIpRanges: + - 18.190.12.32/32 + - 3.143.46.93/32 + - 52.15.119.136/32 + - 35.155.108.162/32 + - 35.162.190.51/32 + - 54.201.61.24/32 + - 52.24.205.48/32 + - 44.235.4.62/32 + - 44.230.241.223/32 + image: + architecture: amd64 + imageId: ami-0ce2cb35386fc22e9 + containerRuntime: + install: true + name: containerd + kubernetes: + install: true + installer: kubeadm + version: v1.31.0 + crictlVersion: v1.31.1 diff --git a/tests/scripts/.definitions.sh b/tests/scripts/.definitions.sh index 5655c8a3d..5843232dd 100644 --- a/tests/scripts/.definitions.sh +++ b/tests/scripts/.definitions.sh @@ -17,7 +17,7 @@ TERRAFORM="terraform -chdir=${TERRAFORM_DIR}" : ${LOG_DIR:="/tmp/logs"} : ${PROJECT:="$(basename "${PROJECT_DIR}")"} : ${TEST_NAMESPACE:="test-operator"} -: ${TARGET_DRIVER_VERSION:="535.104.05"} +: ${TARGET_DRIVER_VERSION:="565.57.01"} : ${OPERATOR_IMAGE:="nvcr.io/nvidia/gpu-operator"} diff --git a/tests/scripts/.rsync-excludes b/tests/scripts/.rsync-excludes index 962cde65f..3a945297f 100644 --- a/tests/scripts/.rsync-excludes +++ b/tests/scripts/.rsync-excludes @@ -2,3 +2,4 @@ vendor/ .git aws-kube-ci cnt-ci +key.pem diff --git a/tests/scripts/update-clusterpolicy.sh b/tests/scripts/update-clusterpolicy.sh index 0b8b14cc9..220cb363d 100755 --- a/tests/scripts/update-clusterpolicy.sh +++ b/tests/scripts/update-clusterpolicy.sh @@ -112,6 +112,13 @@ test_enable_dcgm() { # Verify that standalone nvidia-dcgm and exporter pods are running successfully after update check_pod_ready "nvidia-dcgm" check_pod_ready "nvidia-dcgm-exporter" + + # Test that nvidia-dcgm service is created with interalTrafficPolicy set to "local" + trafficPolicy=$(kubectl get service nvidia-dcgm -n $TEST_NAMESPACE -o json | jq -r '.spec.internalTrafficPolicy') + if [ "$trafficPolicy" != "Local" ]; then + echo "service nvidia-dcgm is missing or internal traffic policy is not set to local" + exit 1 + fi } test_gpu_sharing() { diff --git a/tools/go.mod b/tools/go.mod new file mode 100644 index 000000000..bf6e6ece1 --- /dev/null +++ b/tools/go.mod @@ -0,0 +1,68 @@ +module github.com/NVIDIA/gpu-operator/tools + +go 1.22.7 + +toolchain go1.23.2 + +require ( + k8s.io/code-generator v0.31.2 + sigs.k8s.io/controller-tools v0.16.5 + sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 +) + +require ( + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gobuffalo/flect v1.0.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sergi/go-diff v1.2.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/tools v0.26.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/apimachinery v0.31.2 // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.18.0 // indirect + sigs.k8s.io/kustomize/cmd/config v0.15.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/tools/go.sum b/tools/go.sum new file mode 100644 index 000000000..6e947638e --- /dev/null +++ b/tools/go.sum @@ -0,0 +1,203 @@ +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/code-generator v0.31.2 h1:xLWxG0HEpMSHfcM//3u3Ro2Hmc6AyyLINQS//Z2GEOI= +k8s.io/code-generator v0.31.2/go.mod h1:eEQHXgBU/m7LDaToDoiz3t97dUUVyOblQdwOr8rivqc= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-tools v0.16.5 h1:5k9FNRqziBPwqr17AMEPPV/En39ZBplLAdOwwQHruP4= +sigs.k8s.io/controller-tools v0.16.5/go.mod h1:8vztuRVzs8IuuJqKqbXCSlXcw+lkAv/M2sTpg55qjMY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/cmd/config v0.15.0 h1:WkdY8V2+8J+W00YbImXa2ke9oegfrHH79e+kywW7EdU= +sigs.k8s.io/kustomize/cmd/config v0.15.0/go.mod h1:Jq57b0nPaoYUlOqg//0JtAh6iibboqMcfbtCYoWPM00= +sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 h1:o1mtt6vpxsxDYaZKrw3BnEtc+pAjLz7UffnIvHNbvW0= +sigs.k8s.io/kustomize/kustomize/v5 v5.5.0/go.mod h1:AeFCmgCrXzmvjWWaeZCyBp6XzG1Y0w1svYus8GhJEOE= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 000000000..f16e2e217 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,26 @@ +//go:build tools +// +build tools + +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package tools + +import ( + _ "k8s.io/code-generator/cmd/client-gen" + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" + _ "sigs.k8s.io/kustomize/kustomize/v5" +) diff --git a/validator/Dockerfile b/validator/Dockerfile index f35c96c82..877928861 100644 --- a/validator/Dockerfile +++ b/validator/Dockerfile @@ -12,16 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG CUDA_IMAGE=nvcr.io/nvidia/cuda -ARG CUDA_VERSION=undefined ARG CUDA_SAMPLE_IMAGE=undefined - -ARG BASE_DIST=ubi8 ARG GOLANG_VERSION=x.x.x -FROM ${CUDA_IMAGE}:${CUDA_VERSION}-base-${BASE_DIST} as build +FROM nvcr.io/nvidia/cuda:12.6.2-base-ubi9 as build -RUN yum install -y wget make git gcc +RUN dnf install -y wget make git gcc ARG GOLANG_VERSION=0.0.0 RUN set -eux; \ @@ -30,7 +26,7 @@ RUN set -eux; \ case "${arch##*-}" in \ x86_64 | amd64) ARCH='amd64' ;; \ ppc64el | ppc64le) ARCH='ppc64le' ;; \ - aarch64) ARCH='arm64' ;; \ + aarch64 | arm64) ARCH='arm64' ;; \ *) echo "unsupported architecture" ; exit 1 ;; \ esac; \ wget -nv -O - https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${ARCH}.tar.gz \ @@ -55,7 +51,7 @@ FROM ${CUDA_SAMPLE_IMAGE} AS sample-builder RUN mkdir /artifacts RUN cp /cuda-samples/vectorAdd /artifacts/vectorAdd -FROM ${CUDA_IMAGE}:${CUDA_VERSION}-base-${BASE_DIST} +FROM nvcr.io/nvidia/cuda:12.6.2-base-ubi9 # Remove CUDA libs(compat etc) in favor of libs installed by the NVIDIA driver RUN dnf remove -y cuda-* @@ -91,7 +87,7 @@ LABEL vsc-ref=${GIT_COMMIT} # Install / upgrade packages here that are required to resolve CVEs ARG CVE_UPDATES RUN if [ -n "${CVE_UPDATES}" ]; then \ - yum update -y ${CVE_UPDATES} && \ + dnf update -y ${CVE_UPDATES} && \ rm -rf /var/cache/yum/*; \ fi diff --git a/validator/Makefile b/validator/Makefile index 381bff12b..4d7474ed4 100644 --- a/validator/Makefile +++ b/validator/Makefile @@ -22,7 +22,6 @@ endif ##### Global variables ##### include $(CURDIR)/versions.mk -CUDA_IMAGE ?= nvcr.io/nvidia/cuda BUILDER_IMAGE ?= golang:$(GOLANG_VERSION) ifeq ($(IMAGE_NAME),) @@ -32,18 +31,16 @@ endif BUILD_DIR ?= ../ -IMAGE_VERSION := $(VERSION) -IMAGE_TAG ?= $(IMAGE_VERSION)-$(DIST) +IMAGE_TAG ?= $(VERSION) IMAGE = $(IMAGE_NAME):$(IMAGE_TAG) OUT_IMAGE_NAME ?= $(IMAGE_NAME) -OUT_IMAGE_VERSION ?= $(VERSION) -OUT_IMAGE_TAG = $(OUT_IMAGE_VERSION)-$(DIST) +OUT_IMAGE_TAG = $(VERSION) OUT_IMAGE = $(OUT_IMAGE_NAME):$(OUT_IMAGE_TAG) ##### Public rules ##### -DISTRIBUTIONS := ubi8 ubuntu20.04 -DEFAULT_PUSH_TARGET := ubi8 +DISTRIBUTIONS := ubi9 +DEFAULT_PUSH_TARGET := ubi9 PUSH_TARGETS := $(patsubst %,push-%, $(DISTRIBUTIONS)) BUILD_TARGETS := $(patsubst %,build-%, $(DISTRIBUTIONS)) @@ -65,17 +62,6 @@ validator: CGO_ENABLED=0 GOOS=$(GOOS) \ go build -ldflags "-s -w -X $(VERSION_PKG).gitCommit=$(GIT_COMMIT) -X $(VERSION_PKG).version=$(VERSION)" -o validator . -# For the default push target we also push a short tag equal to the version. -# We skip this for the development release -DEVEL_RELEASE_IMAGE_VERSION ?= devel -ifneq ($(strip $(VERSION)),$(DEVEL_RELEASE_IMAGE_VERSION)) -push-$(DEFAULT_PUSH_TARGET): push-short -endif - -push-%: DIST = $(*) -push-short: DIST = $(DEFAULT_PUSH_TARGET) - -build-%: DIST = $(*) build-%: DOCKERFILE = $(CURDIR)/Dockerfile $(DISTRIBUTIONS): %: build-%: @@ -85,13 +71,10 @@ $(BUILD_TARGETS): build-%: $(DOCKER_BUILD_OPTIONS) \ $(DOCKER_BUILD_PLATFORM_OPTIONS) \ --tag $(IMAGE) \ - --build-arg BASE_DIST="$(DIST)" \ - --build-arg CUDA_IMAGE="$(CUDA_IMAGE)" \ - --build-arg CUDA_VERSION="$(CUDA_VERSION)" \ --build-arg VERSION="$(VERSION)" \ --build-arg GIT_COMMIT="$(GIT_COMMIT)" \ --build-arg BUILDER_IMAGE="$(BUILDER_IMAGE)" \ - --build-arg CUDA_SAMPLE_IMAGE=nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda$(CUDA_SAMPLES_VERSION)-$(*) \ + --build-arg CUDA_SAMPLE_IMAGE=nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda$(CUDA_SAMPLES_VERSION)-ubi8 \ --build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \ --build-arg CVE_UPDATES="$(CVE_UPDATES)" \ --file $(DOCKERFILE) $(BUILD_DIR) @@ -101,4 +84,3 @@ $(BUILD_TARGETS): build-%: docker-image: OUT_IMAGE ?= $(IMAGE_NAME):$(IMAGE_TAG) docker-image: $(DEFAULT_PUSH_TARGET) $(DOCKER) tag $(IMAGE_NAME):$(IMAGE_TAG) $(OUT_IMAGE) - diff --git a/validator/driver.go b/validator/driver.go new file mode 100644 index 000000000..6c25fada3 --- /dev/null +++ b/validator/driver.go @@ -0,0 +1,73 @@ +/* +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +*/ + +package main + +// driverInfo contains information about an NVIDIA driver installation. +// +// isHostDriver indicates whether the driver is installed directly on +// the host at the host's root filesystem. +// +// hostRoot represents the host's root filesystem (typically '/'). +// +// driverRoot and devRoot represent the absolute paths of the driver install +// and NVIDIA device nodes on the host. +// +// driverRootCtrPath and devRootCtrPath represent the paths of the driver install +// and NVIDIA device nodes in the management containers that require them, like +// the Toolkit Container, the Device Plugin, and MIG Manager. +type driverInfo struct { + isHostDriver bool + hostRoot string + driverRoot string + driverRootCtrPath string + devRoot string + devRootCtrPath string +} + +func getDriverInfo(isHostDriver bool, hostRoot string, driverInstallDir string, driverInstallDirCtrPath string) driverInfo { + if isHostDriver { + return driverInfo{ + isHostDriver: true, + hostRoot: hostRoot, + driverRoot: hostRoot, + driverRootCtrPath: "/host", + devRoot: hostRoot, + devRootCtrPath: "/host", + } + } + + // For drivers not installed directly on the host, devRoot can either be + // hostRoot or driverInstallDir + var devRoot, devRootCtrPath string + devRoot = root(driverInstallDirCtrPath).getDevRoot() + if devRoot == "/" { + devRoot = hostRoot + devRootCtrPath = "/host" + } else { + devRoot = driverInstallDir + devRootCtrPath = "/driver-root" + } + + return driverInfo{ + isHostDriver: false, + hostRoot: hostRoot, + driverRoot: driverInstallDir, + driverRootCtrPath: "/driver-root", + devRoot: devRoot, + devRootCtrPath: devRootCtrPath, + } +} diff --git a/validator/find.go b/validator/find.go new file mode 100644 index 000000000..0d0d39697 --- /dev/null +++ b/validator/find.go @@ -0,0 +1,109 @@ +/* +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + "path/filepath" +) + +type root string + +// getDriverLibraryPath returns path to `libnvidia-ml.so.1` in the driver root. +// The folder for this file is also expected to be the location of other driver files. +func (r root) getDriverLibraryPath() (string, error) { + librarySearchPaths := []string{ + "/usr/lib64", + "/usr/lib/x86_64-linux-gnu", + "/usr/lib/aarch64-linux-gnu", + "/lib64", + "/lib/x86_64-linux-gnu", + "/lib/aarch64-linux-gnu", + } + + libraryPath, err := r.findFile("libnvidia-ml.so.1", librarySearchPaths...) + if err != nil { + return "", err + } + + return libraryPath, nil +} + +// getNvidiaSMIPath returns path to the `nvidia-smi` executable in the driver root. +func (r root) getNvidiaSMIPath() (string, error) { + binarySearchPaths := []string{ + "/usr/bin", + "/usr/sbin", + "/bin", + "/sbin", + } + + binaryPath, err := r.findFile("nvidia-smi", binarySearchPaths...) + if err != nil { + return "", err + } + + return binaryPath, nil +} + +// isDevRoot checks whether the specified root is a dev root. +// A dev root is defined as a root containing a /dev folder. +func (r root) isDevRoot() bool { + stat, err := os.Stat(filepath.Join(string(r), "dev")) + if err != nil { + return false + } + return stat.IsDir() +} + +// getDevRoot returns the dev root associated with the root. +// If the root is not a dev root, this defaults to "/". +func (r root) getDevRoot() string { + if r.isDevRoot() { + return string(r) + } + return "/" +} + +// findFile searches the root for a specified file. +// A number of folders can be specified to search in addition to the root itself. +// If the file represents a symlink, this is resolved and the final path is returned. +func (r root) findFile(name string, searchIn ...string) (string, error) { + + for _, d := range append([]string{"/"}, searchIn...) { + l := filepath.Join(string(r), d, name) + candidate, err := resolveLink(l) + if err != nil { + continue + } + return candidate, nil + } + + return "", fmt.Errorf("error locating %q", name) +} + +// resolveLink finds the target of a symlink or the file itself in the +// case of a regular file. +// This is equivalent to running `readlink -f ${l}`. +func resolveLink(l string) (string, error) { + resolved, err := filepath.EvalSymlinks(l) + if err != nil { + return "", fmt.Errorf("error resolving link '%s': %w", l, err) + } + return resolved, nil +} diff --git a/validator/main.go b/validator/main.go index c95894803..df92cd4d4 100644 --- a/validator/main.go +++ b/validator/main.go @@ -33,7 +33,6 @@ import ( devchar "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks" log "github.com/sirupsen/logrus" cli "github.com/urfave/cli/v2" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,6 +43,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + nvidiav1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1" + nvidiav1alpha1 "github.com/NVIDIA/gpu-operator/api/nvidia/v1alpha1" "github.com/NVIDIA/gpu-operator/internal/info" ) @@ -55,7 +56,9 @@ type Component interface { } // Driver component -type Driver struct{} +type Driver struct { + ctx context.Context +} // NvidiaFs GDS Driver component type NvidiaFs struct{} @@ -121,6 +124,9 @@ var ( metricsPort int defaultGPUWorkloadConfigFlag string disableDevCharSymlinkCreation bool + hostRootFlag string + driverInstallDirFlag string + driverInstallDirCtrPathFlag string ) // defaultGPUWorkloadConfig is "vm-passthrough" unless @@ -136,12 +142,12 @@ const ( defaultMetricsPort = 0 // hostDevCharPath indicates the path in the container where the host '/dev/char' directory is mounted to hostDevCharPath = "/host-dev-char" - // driverContainerRoot indicates the path on the host where driver container mounts it's root filesystem - driverContainerRoot = "/run/nvidia/driver" + // defaultDriverInstallDir indicates the default path on the host where the driver container installation is made available + defaultDriverInstallDir = "/run/nvidia/driver" + // defaultDriverInstallDirCtrPath indicates the default path where the NVIDIA driver install dir is mounted in the container + defaultDriverInstallDirCtrPath = "/run/nvidia/driver" // driverStatusFile indicates status file for containerizeddriver readiness driverStatusFile = "driver-ready" - // hostDriverStatusFile indicates status file for host driver readiness - hostDriverStatusFile = "host-driver-ready" // nvidiaFsStatusFile indicates status file for nvidia-fs driver readiness nvidiaFsStatusFile = "nvidia-fs-ready" // toolkitStatusFile indicates status file for toolkit readiness @@ -207,6 +213,8 @@ const ( gpuWorkloadConfigVMVgpu = "vm-vgpu" // CCCapableLabelKey represents NFD label name to indicate if the node is capable to run CC workloads CCCapableLabelKey = "nvidia.com/cc.capable" + // appComponentLabelKey indicates the label key of the component + appComponentLabelKey = "app.kubernetes.io/component" ) func main() { @@ -318,6 +326,27 @@ func main() { Destination: &disableDevCharSymlinkCreation, EnvVars: []string{"DISABLE_DEV_CHAR_SYMLINK_CREATION"}, }, + &cli.StringFlag{ + Name: "host-root", + Value: "/", + Usage: "root path of the underlying host", + Destination: &hostRootFlag, + EnvVars: []string{"HOST_ROOT"}, + }, + &cli.StringFlag{ + Name: "driver-install-dir", + Value: defaultDriverInstallDir, + Usage: "the path on the host where a containerized NVIDIA driver installation is made available", + Destination: &driverInstallDirFlag, + EnvVars: []string{"DRIVER_INSTALL_DIR"}, + }, + &cli.StringFlag{ + Name: "driver-install-dir-ctr-path", + Value: defaultDriverInstallDirCtrPath, + Usage: "the path where the NVIDIA driver install dir is mounted in the container", + Destination: &driverInstallDirCtrPathFlag, + EnvVars: []string{"DRIVER_INSTALL_DIR_CTR_PATH"}, + }, } // Log version info @@ -426,12 +455,12 @@ func getWorkloadConfig(ctx context.Context) (string, error) { kubeClient, err := kubernetes.NewForConfig(kubeConfig) if err != nil { - return "", fmt.Errorf("Error getting k8s client - %s", err.Error()) + return "", fmt.Errorf("error getting k8s client - %w", err) } node, err := getNode(ctx, kubeClient) if err != nil { - return "", fmt.Errorf("Error getting node labels - %s", err.Error()) + return "", fmt.Errorf("error getting node labels - %w", err) } labels := node.GetLabels() @@ -467,24 +496,26 @@ func start(c *cli.Context) error { switch componentFlag { case "driver": - driver := &Driver{} + driver := &Driver{ + ctx: c.Context, + } err := driver.validate() if err != nil { - return fmt.Errorf("error validating driver installation: %s", err) + return fmt.Errorf("error validating driver installation: %w", err) } return nil case "nvidia-fs": nvidiaFs := &NvidiaFs{} err := nvidiaFs.validate() if err != nil { - return fmt.Errorf("error validating nvidia-fs driver installation: %s", err) + return fmt.Errorf("error validating nvidia-fs driver installation: %w", err) } return nil case "toolkit": toolkit := &Toolkit{} err := toolkit.validate() if err != nil { - return fmt.Errorf("error validating toolkit installation: %s", err) + return fmt.Errorf("error validating toolkit installation: %w", err) } return nil case "cuda": @@ -493,7 +524,7 @@ func start(c *cli.Context) error { } err := cuda.validate() if err != nil { - return fmt.Errorf("error validating cuda workload: %s", err) + return fmt.Errorf("error validating cuda workload: %w", err) } return nil case "plugin": @@ -502,7 +533,7 @@ func start(c *cli.Context) error { } err := plugin.validate() if err != nil { - return fmt.Errorf("error validating plugin installation: %s", err) + return fmt.Errorf("error validating plugin installation: %w", err) } return nil case "mofed": @@ -529,7 +560,7 @@ func start(c *cli.Context) error { } err := vfioPCI.validate() if err != nil { - return fmt.Errorf("error validating vfio-pci driver installation: %s", err) + return fmt.Errorf("error validating vfio-pci driver installation: %w", err) } return nil case "vgpu-manager": @@ -538,7 +569,7 @@ func start(c *cli.Context) error { } err := vGPUManager.validate() if err != nil { - return fmt.Errorf("error validating vGPU Manager installation: %s", err) + return fmt.Errorf("error validating vGPU Manager installation: %w", err) } return nil case "vgpu-devices": @@ -556,7 +587,7 @@ func start(c *cli.Context) error { } err := CCManager.validate() if err != nil { - return fmt.Errorf("error validating CC Manager installation: %s", err) + return fmt.Errorf("error validating CC Manager installation: %w", err) } return nil default: @@ -591,19 +622,31 @@ func runCommandWithWait(command string, args []string, sleepSeconds int, silent } } -func getDriverRoot() (string, bool) { - // check if driver is pre-installed on the host and use host path for validation - if fileInfo, err := os.Lstat("/host/usr/bin/nvidia-smi"); err == nil && fileInfo.Size() != 0 { - log.Infof("Detected pre-installed driver on the host") - return "/host", true +// prependPathListEnvvar prepends a specified list of strings to a specified envvar and returns its value. +func prependPathListEnvvar(envvar string, prepend ...string) string { + if len(prepend) == 0 { + return os.Getenv(envvar) } + current := filepath.SplitList(os.Getenv(envvar)) + return strings.Join(append(prepend, current...), string(filepath.ListSeparator)) +} - return driverContainerRoot, false +// setEnvVar adds or updates an envar to the list of specified envvars and returns it. +func setEnvVar(envvars []string, key, value string) []string { + var updated []string + for _, envvar := range envvars { + pair := strings.SplitN(envvar, "=", 2) + if pair[0] == key { + continue + } + updated = append(updated, envvar) + } + return append(updated, fmt.Sprintf("%s=%s", key, value)) } // For driver container installs, check existence of .driver-ctr-ready to confirm running driver // container has completed and is in Ready state. -func assertDriverContainerReady(silent, withWaitFlag bool) error { +func assertDriverContainerReady(silent bool) error { command := "bash" args := []string{"-c", "stat /run/nvidia/validations/.driver-ctr-ready"} @@ -614,24 +657,117 @@ func assertDriverContainerReady(silent, withWaitFlag bool) error { return runCommand(command, args, silent) } -func (d *Driver) runValidation(silent bool) (string, bool, error) { - driverRoot, isHostDriver := getDriverRoot() - if !isHostDriver { - log.Infof("Driver is not pre-installed on the host. Checking driver container status.") - if err := assertDriverContainerReady(silent, withWaitFlag); err != nil { - return "", false, fmt.Errorf("error checking driver container status: %v", err) +// isDriverManagedByOperator determines if the NVIDIA driver is managed by the GPU Operator. +// We check if at least one driver DaemonSet exists in the operator namespace that is +// owned by the ClusterPolicy or NVIDIADriver controllers. +func isDriverManagedByOperator(ctx context.Context) (bool, error) { + kubeConfig, err := rest.InClusterConfig() + if err != nil { + return false, fmt.Errorf("error getting cluster config: %w", err) + } + + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return false, fmt.Errorf("error getting k8s client: %w", err) + } + + opts := meta_v1.ListOptions{LabelSelector: labels.Set{appComponentLabelKey: "nvidia-driver"}.AsSelector().String()} + dsList, err := kubeClient.AppsV1().DaemonSets(namespaceFlag).List(ctx, opts) + if err != nil { + return false, fmt.Errorf("error listing daemonsets: %w", err) + } + + for i := range dsList.Items { + ds := dsList.Items[i] + owner := meta_v1.GetControllerOf(&ds) + if owner == nil { + continue + } + if strings.HasPrefix(owner.APIVersion, "nvidia.com/") && (owner.Kind == nvidiav1.ClusterPolicyCRDName || owner.Kind == nvidiav1alpha1.NVIDIADriverCRDName) { + return true, nil } } - // invoke validation command + return false, nil +} + +func validateHostDriver(silent bool) error { + log.Info("Attempting to validate a pre-installed driver on the host") + fileInfo, err := os.Lstat("/host/usr/bin/nvidia-smi") + if err != nil { + return fmt.Errorf("no 'nvidia-smi' file present on the host: %w", err) + } + if fileInfo.Size() == 0 { + return fmt.Errorf("empty 'nvidia-smi' file found on the host") + } command := "chroot" - args := []string{driverRoot, "nvidia-smi"} + args := []string{"/host", "nvidia-smi"} - if withWaitFlag { - return driverRoot, isHostDriver, runCommandWithWait(command, args, sleepIntervalSecondsFlag, silent) + return runCommand(command, args, silent) +} + +func validateDriverContainer(silent bool, ctx context.Context) error { + driverManagedByOperator, err := isDriverManagedByOperator(ctx) + if err != nil { + return fmt.Errorf("error checking if driver is managed by GPU Operator: %w", err) + } + + if driverManagedByOperator { + log.Infof("Driver is not pre-installed on the host and is managed by GPU Operator. Checking driver container status.") + if err := assertDriverContainerReady(silent); err != nil { + return fmt.Errorf("error checking driver container status: %w", err) + } + } + + driverRoot := root(driverInstallDirCtrPathFlag) + + validateDriver := func(silent bool) error { + driverLibraryPath, err := driverRoot.getDriverLibraryPath() + if err != nil { + return fmt.Errorf("failed to locate driver libraries: %w", err) + } + + nvidiaSMIPath, err := driverRoot.getNvidiaSMIPath() + if err != nil { + return fmt.Errorf("failed to locate nvidia-smi: %w", err) + } + cmd := exec.Command(nvidiaSMIPath) + // In order for nvidia-smi to run, we need to update LD_PRELOAD to include the path to libnvidia-ml.so.1. + cmd.Env = setEnvVar(os.Environ(), "LD_PRELOAD", prependPathListEnvvar("LD_PRELOAD", driverLibraryPath)) + if !silent { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + return cmd.Run() + } + + for { + log.Info("Attempting to validate a driver container installation") + err := validateDriver(silent) + if err != nil { + if !withWaitFlag { + return fmt.Errorf("error validating driver: %w", err) + } + log.Warningf("failed to validate the driver, retrying after %d seconds\n", sleepIntervalSecondsFlag) + time.Sleep(time.Duration(sleepIntervalSecondsFlag) * time.Second) + continue + } + return nil + } +} + +func (d *Driver) runValidation(silent bool) (driverInfo, error) { + err := validateHostDriver(silent) + if err == nil { + log.Info("Detected a pre-installed driver on the host") + return getDriverInfo(true, hostRootFlag, hostRootFlag, "/host"), nil } - return driverRoot, isHostDriver, runCommand(command, args, silent) + err = validateDriverContainer(silent, d.ctx) + if err != nil { + return driverInfo{}, err + } + return getDriverInfo(false, hostRootFlag, driverInstallDirFlag, driverInstallDirCtrPathFlag), nil } func (d *Driver) validate() error { @@ -641,77 +777,86 @@ func (d *Driver) validate() error { return err } - // delete host driver status file is already present - err = deleteStatusFile(outputDirFlag + "/" + hostDriverStatusFile) + driverInfo, err := d.runValidation(false) if err != nil { + log.Errorf("driver is not ready: %v", err) return err } - driverRoot, isHostDriver, err := d.runValidation(false) + err = createDevCharSymlinks(driverInfo, disableDevCharSymlinkCreation) if err != nil { - log.Error("driver is not ready") - return err + msg := strings.Join([]string{ + "Failed to create symlinks under /dev/char that point to all possible NVIDIA character devices.", + "The existence of these symlinks is required to address the following bug:", + "", + " https://github.com/NVIDIA/gpu-operator/issues/430", + "", + "This bug impacts container runtimes configured with systemd cgroup management enabled.", + "To disable the symlink creation, set the following envvar in ClusterPolicy:", + "", + " validator:", + " driver:", + " env:", + " - name: DISABLE_DEV_CHAR_SYMLINK_CREATION", + " value: \"true\""}, "\n") + return fmt.Errorf("%w\n\n%s", err, msg) } - if !disableDevCharSymlinkCreation { - log.Info("creating symlinks under /dev/char that correspond to NVIDIA character devices") - err = createDevCharSymlinks(driverRoot, isHostDriver) - if err != nil { - msg := strings.Join([]string{ - "Failed to create symlinks under /dev/char that point to all possible NVIDIA character devices.", - "The existence of these symlinks is required to address the following bug:", - "", - " https://github.com/NVIDIA/gpu-operator/issues/430", - "", - "This bug impacts container runtimes configured with systemd cgroup management enabled.", - "To disable the symlink creation, set the following envvar in ClusterPolicy:", - "", - " validator:", - " driver:", - " env:", - " - name: DISABLE_DEV_CHAR_SYMLINK_CREATION", - " value: \"true\""}, "\n") - return fmt.Errorf("%v\n\n%s", err, msg) - } - } + return d.createStatusFile(driverInfo) +} - statusFile := driverStatusFile - if isHostDriver { - statusFile = hostDriverStatusFile - } +func (d *Driver) createStatusFile(driverInfo driverInfo) error { + statusFileContent := strings.Join([]string{ + fmt.Sprintf("IS_HOST_DRIVER=%t", driverInfo.isHostDriver), + fmt.Sprintf("NVIDIA_DRIVER_ROOT=%s", driverInfo.driverRoot), + fmt.Sprintf("DRIVER_ROOT_CTR_PATH=%s", driverInfo.driverRootCtrPath), + fmt.Sprintf("NVIDIA_DEV_ROOT=%s", driverInfo.devRoot), + fmt.Sprintf("DEV_ROOT_CTR_PATH=%s", driverInfo.devRootCtrPath), + }, "\n") + "\n" // create driver status file - err = createStatusFile(outputDirFlag + "/" + statusFile) - if err != nil { - return err - } - return nil + return createStatusFileWithContent(outputDirFlag+"/"+driverStatusFile, statusFileContent) } // createDevCharSymlinks creates symlinks in /host-dev-char that point to all possible NVIDIA devices nodes. -func createDevCharSymlinks(driverRoot string, isHostDriver bool) error { - // If the host driver is being used, we rely on the fact that we are running a privileged container and as such - // have access to /dev - devRoot := driverRoot - if isHostDriver { - devRoot = "/" +func createDevCharSymlinks(driverInfo driverInfo, disableDevCharSymlinkCreation bool) error { + if disableDevCharSymlinkCreation { + log.WithField("disableDevCharSymlinkCreation", true). + Info("skipping the creation of symlinks under /dev/char that correspond to NVIDIA character devices") + return nil + } + + log.Info("creating symlinks under /dev/char that correspond to NVIDIA character devices") + + // Only attempt to load NVIDIA kernel modules when we can chroot into driverRoot + loadKernelModules := driverInfo.isHostDriver || (driverInfo.devRoot == driverInfo.driverRoot) + + // driverRootCtrPath is the path of the driver install dir in the container. This will either be + // driverInstallDirCtrPathFlag or '/host'. + // Note, if we always mounted the driver install dir to '/driver-root' in the validation container + // instead, then we could simplify to always use driverInfo.driverRootCtrPath -- which would be + // either '/host' or '/driver-root', both paths would exist in the validation container. + driverRootCtrPath := driverInstallDirCtrPathFlag + if driverInfo.isHostDriver { + driverRootCtrPath = "/host" } + // We now create the symlinks in /dev/char. creator, err := devchar.NewSymlinkCreator( - devchar.WithDriverRoot(driverRoot), - devchar.WithDevRoot(devRoot), + devchar.WithDriverRoot(driverRootCtrPath), + devchar.WithDevRoot(driverInfo.devRoot), devchar.WithDevCharPath(hostDevCharPath), devchar.WithCreateAll(true), devchar.WithCreateDeviceNodes(true), - devchar.WithLoadKernelModules(true), + devchar.WithLoadKernelModules(loadKernelModules), ) if err != nil { - return fmt.Errorf("error creating symlink creator: %v", err) + return fmt.Errorf("error creating symlink creator: %w", err) } err = creator.CreateLinks() if err != nil { - return fmt.Errorf("error creating symlinks: %v", err) + return fmt.Errorf("error creating symlinks: %w", err) } return nil @@ -726,16 +871,23 @@ func createStatusFile(statusFile string) error { } func createStatusFileWithContent(statusFile string, content string) error { - f, err := os.Create(statusFile) + dir := filepath.Dir(statusFile) + tmpFile, err := os.CreateTemp(dir, filepath.Base(statusFile)+".*.tmp") if err != nil { - return fmt.Errorf("unable to create status file %s: %s", statusFile, err) + return fmt.Errorf("failed to create temporary status file: %w", err) } - - _, err = f.WriteString(content) + _, err = tmpFile.WriteString(content) + tmpFile.Close() if err != nil { - return fmt.Errorf("unable to write contents of status file %s: %s", statusFile, err) + return fmt.Errorf("failed to write temporary status file: %w", err) } + defer func() { + _ = os.Remove(tmpFile.Name()) + }() + if err := os.Rename(tmpFile.Name(), statusFile); err != nil { + return fmt.Errorf("error moving temporary file to '%s': %w", statusFile, err) + } return nil } @@ -743,7 +895,7 @@ func deleteStatusFile(statusFile string) error { err := os.Remove(statusFile) if err != nil { if !os.IsNotExist(err) { - return fmt.Errorf("unable to remove driver status file %s: %s", statusFile, err) + return fmt.Errorf("unable to remove driver status file %s: %w", statusFile, err) } // status file already removed } @@ -879,7 +1031,7 @@ func (m *MOFED) validate() error { present, err := m.isMellanoxDevicePresent() if err != nil { - log.Errorf(err.Error()) + log.Errorf("Error trying to retrieve Mellanox device - %s\n", err.Error()) return err } if !present { @@ -968,19 +1120,16 @@ func (p *Plugin) runWorkload() error { pod.Spec.RuntimeClassName = &runtimeClass } - // update owner reference - err = setOwnerReference(ctx, p.kubeClient, pod) + validatorDaemonset, err := p.kubeClient.AppsV1().DaemonSets(namespaceFlag).Get(ctx, "nvidia-operator-validator", meta_v1.GetOptions{}) if err != nil { - return fmt.Errorf("unable to set ownerReference for validator pod: %s", err) + return fmt.Errorf("unable to retrieve the operator validator daemonset: %w", err) } + // update owner reference + pod.SetOwnerReferences(validatorDaemonset.ObjectMeta.OwnerReferences) // set pod tolerations - err = setTolerations(ctx, p.kubeClient, pod) - if err != nil { - return fmt.Errorf("unable to set tolerations for validator pod: %s", err) - } - - // update podSpec with node name so it will just run on current node + pod.Spec.Tolerations = validatorDaemonset.Spec.Template.Spec.Tolerations + // update podSpec with node name, so it will just run on current node pod.Spec.NodeName = nodeNameFlag resourceName, err := p.getGPUResourceName() @@ -1000,7 +1149,7 @@ func (p *Plugin) runWorkload() error { // check if plugin validation pod is already running and cleanup. podList, err := p.kubeClient.CoreV1().Pods(namespaceFlag).List(ctx, opts) if err != nil { - return fmt.Errorf("cannot list existing validation pods: %s", err) + return fmt.Errorf("cannot list existing validation pods: %w", err) } if podList != nil && len(podList.Items) > 0 { @@ -1009,14 +1158,14 @@ func (p *Plugin) runWorkload() error { options := meta_v1.DeleteOptions{PropagationPolicy: &propagation, GracePeriodSeconds: &gracePeriod} err = p.kubeClient.CoreV1().Pods(namespaceFlag).Delete(ctx, podList.Items[0].ObjectMeta.Name, options) if err != nil { - return fmt.Errorf("cannot delete previous validation pod: %s", err) + return fmt.Errorf("cannot delete previous validation pod: %w", err) } } // wait for plugin validation pod to be ready. newPod, err := p.kubeClient.CoreV1().Pods(namespaceFlag).Create(ctx, pod, meta_v1.CreateOptions{}) if err != nil { - return fmt.Errorf("failed to create plugin validation pod %s, err %+v", pod.ObjectMeta.Name, err) + return fmt.Errorf("failed to create plugin validation pod %s, err %w", pod.ObjectMeta.Name, err) } // make sure its available @@ -1027,37 +1176,13 @@ func (p *Plugin) runWorkload() error { return nil } -func setOwnerReference(ctx context.Context, kubeClient kubernetes.Interface, pod *corev1.Pod) error { - // get owner of validator daemonset (which is ClusterPolicy) - validatorDaemonset, err := kubeClient.AppsV1().DaemonSets(namespaceFlag).Get(ctx, "nvidia-operator-validator", meta_v1.GetOptions{}) - if err != nil { - return err - } - - // update owner reference of plugin workload validation pod as ClusterPolicy for cleanup - pod.SetOwnerReferences(validatorDaemonset.ObjectMeta.OwnerReferences) - return nil -} - -func setTolerations(ctx context.Context, kubeClient kubernetes.Interface, pod *corev1.Pod) error { - // get tolerations of validator daemonset - validatorDaemonset, err := kubeClient.AppsV1().DaemonSets(namespaceFlag).Get(ctx, "nvidia-operator-validator", meta_v1.GetOptions{}) - if err != nil { - return err - } - - // set same tolerations for individual validator pods - pod.Spec.Tolerations = validatorDaemonset.Spec.Template.Spec.Tolerations - return nil -} - // waits for the pod to be created func waitForPod(ctx context.Context, kubeClient kubernetes.Interface, name string, namespace string) error { for i := 0; i < podCreationWaitRetries; i++ { // check for the existence of the resource pod, err := kubeClient.CoreV1().Pods(namespace).Get(ctx, name, meta_v1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to get pod %s, err %+v", name, err) + return fmt.Errorf("failed to get pod %s, err %w", name, err) } if pod.Status.Phase != "Succeeded" { log.Infof("pod %s is curently in %s phase", name, pod.Status.Phase) @@ -1097,7 +1222,7 @@ func (p *Plugin) countGPUResources() (int64, error) { // get node info to check discovered GPU resources node, err := getNode(p.ctx, p.kubeClient) if err != nil { - return -1, fmt.Errorf("unable to fetch node by name %s to check for GPU resources: %s", nodeNameFlag, err) + return -1, fmt.Errorf("unable to fetch node by name %s to check for GPU resources: %w", nodeNameFlag, err) } count := int64(0) @@ -1170,7 +1295,7 @@ func (p *Plugin) getGPUResourceName() (corev1.ResourceName, error) { return resourceName, nil } - return "", fmt.Errorf("Unable to find any allocatable GPU resource") + return "", fmt.Errorf("unable to find any allocatable GPU resource") } func (p *Plugin) setKubeClient(kubeClient kubernetes.Interface) { @@ -1180,7 +1305,7 @@ func (p *Plugin) setKubeClient(kubeClient kubernetes.Interface) { func getNode(ctx context.Context, kubeClient kubernetes.Interface) (*corev1.Node, error) { node, err := kubeClient.CoreV1().Nodes().Get(ctx, nodeNameFlag, meta_v1.GetOptions{}) if err != nil { - log.Errorf("unable to get node with name %s, err %s", nodeNameFlag, err.Error()) + log.Errorf("unable to get node with name %s, err %v", nodeNameFlag, err) return nil, err } return node, nil @@ -1259,19 +1384,16 @@ func (c *CUDA) runWorkload() error { pod.Spec.RuntimeClassName = &runtimeClass } - // update owner reference - err = setOwnerReference(ctx, c.kubeClient, pod) + validatorDaemonset, err := c.kubeClient.AppsV1().DaemonSets(namespaceFlag).Get(ctx, "nvidia-operator-validator", meta_v1.GetOptions{}) if err != nil { - return fmt.Errorf("unable to set owner reference for validator pod: %s", err) + return fmt.Errorf("unable to retrieve the operator validator daemonset: %w", err) } + // update owner reference + pod.SetOwnerReferences(validatorDaemonset.ObjectMeta.OwnerReferences) // set pod tolerations - err = setTolerations(ctx, c.kubeClient, pod) - if err != nil { - return fmt.Errorf("unable to set tolerations for validator pod: %s", err) - } - - // update podSpec with node name so it will just run on current node + pod.Spec.Tolerations = validatorDaemonset.Spec.Template.Spec.Tolerations + // update podSpec with node name, so it will just run on current node pod.Spec.NodeName = nodeNameFlag opts := meta_v1.ListOptions{LabelSelector: labels.Set{"app": cudaValidatorLabelValue}.AsSelector().String(), @@ -1299,7 +1421,7 @@ func (c *CUDA) runWorkload() error { return fmt.Errorf("failed to create cuda validation pod %s, err %+v", pod.ObjectMeta.Name, err) } - // make sure its available + // make sure it's available err = waitForPod(ctx, c.kubeClient, newPod.ObjectMeta.Name, namespaceFlag) if err != nil { return err @@ -1318,13 +1440,13 @@ func (v *VfioPCI) validate() error { gpuWorkloadConfig, err := getWorkloadConfig(ctx) if err != nil { - return fmt.Errorf("Error getting gpu workload config: %s", err.Error()) + return fmt.Errorf("error getting gpu workload config: %w", err) } log.Infof("GPU workload configuration: %s", gpuWorkloadConfig) err = createStatusFileWithContent(filepath.Join(outputDirFlag, workloadTypeStatusFile), gpuWorkloadConfig+"\n") if err != nil { - return fmt.Errorf("Error updating %s status file: %v", workloadTypeStatusFile, err) + return fmt.Errorf("error updating %s status file: %w", workloadTypeStatusFile, err) } if gpuWorkloadConfig != gpuWorkloadConfigVMPassthrough { @@ -1340,7 +1462,7 @@ func (v *VfioPCI) validate() error { return err } - err = v.runValidation(false) + err = v.runValidation() if err != nil { return err } @@ -1354,11 +1476,11 @@ func (v *VfioPCI) validate() error { return nil } -func (v *VfioPCI) runValidation(silent bool) error { +func (v *VfioPCI) runValidation() error { nvpci := nvpci.New() nvdevices, err := nvpci.GetGPUs() if err != nil { - return fmt.Errorf("error getting NVIDIA PCI devices: %v", err) + return fmt.Errorf("error getting NVIDIA PCI devices: %w", err) } for _, dev := range nvdevices { @@ -1375,13 +1497,13 @@ func (v *VGPUManager) validate() error { gpuWorkloadConfig, err := getWorkloadConfig(ctx) if err != nil { - return fmt.Errorf("Error getting gpu workload config: %s", err.Error()) + return fmt.Errorf("error getting gpu workload config: %w", err) } log.Infof("GPU workload configuration: %s", gpuWorkloadConfig) err = createStatusFileWithContent(filepath.Join(outputDirFlag, workloadTypeStatusFile), gpuWorkloadConfig+"\n") if err != nil { - return fmt.Errorf("Error updating %s status file: %v", workloadTypeStatusFile, err) + return fmt.Errorf("error updating %s status file: %w", workloadTypeStatusFile, err) } if gpuWorkloadConfig != gpuWorkloadConfigVMVgpu { @@ -1449,12 +1571,12 @@ func (c *CCManager) validate() error { kubeConfig, err := rest.InClusterConfig() if err != nil { - return fmt.Errorf("Error getting cluster config - %s", err.Error()) + return fmt.Errorf("error getting cluster config - %w", err) } kubeClient, err := kubernetes.NewForConfig(kubeConfig) if err != nil { - log.Errorf("Error getting k8s client - %s\n", err.Error()) + log.Errorf("Error getting k8s client - %v\n", err) return err } @@ -1478,7 +1600,8 @@ func (c *CCManager) validate() error { func (c *CCManager) runValidation(silent bool) error { node, err := getNode(c.ctx, c.kubeClient) if err != nil { - return fmt.Errorf("unable to fetch node by name %s to check for %s label: %s", nodeNameFlag, CCCapableLabelKey, err) + return fmt.Errorf("unable to fetch node by name %s to check for %s label: %w", + nodeNameFlag, CCCapableLabelKey, err) } // make sure this is a CC capable node @@ -1517,13 +1640,13 @@ func (v *VGPUDevices) validate() error { gpuWorkloadConfig, err := getWorkloadConfig(ctx) if err != nil { - return fmt.Errorf("Error getting gpu workload config: %s", err.Error()) + return fmt.Errorf("error getting gpu workload config: %w", err) } log.Infof("GPU workload configuration: %s", gpuWorkloadConfig) err = createStatusFileWithContent(filepath.Join(outputDirFlag, workloadTypeStatusFile), gpuWorkloadConfig+"\n") if err != nil { - return fmt.Errorf("Error updating %s status file: %v", workloadTypeStatusFile, err) + return fmt.Errorf("error updating %s status file: %w", workloadTypeStatusFile, err) } if gpuWorkloadConfig != gpuWorkloadConfigVMVgpu { @@ -1539,7 +1662,7 @@ func (v *VGPUDevices) validate() error { return err } - err = v.runValidation(false) + err = v.runValidation() if err != nil { return err } @@ -1554,17 +1677,17 @@ func (v *VGPUDevices) validate() error { return nil } -func (v *VGPUDevices) runValidation(silent bool) error { +func (v *VGPUDevices) runValidation() error { nvmdev := nvmdev.New() vGPUDevices, err := nvmdev.GetAllDevices() if err != nil { - return fmt.Errorf("Error checking for vGPU devices on the host: %v", err) + return fmt.Errorf("error checking for vGPU devices on the host: %w", err) } if !withWaitFlag { numDevices := len(vGPUDevices) if numDevices == 0 { - return fmt.Errorf("No vGPU devices found") + return fmt.Errorf("no vGPU devices found") } log.Infof("Found %d vGPU devices", numDevices) @@ -1582,7 +1705,7 @@ func (v *VGPUDevices) runValidation(silent bool) error { vGPUDevices, err = nvmdev.GetAllDevices() if err != nil { - return fmt.Errorf("Error checking for vGPU devices on the host: %v", err) + return fmt.Errorf("error checking for vGPU devices on the host: %w", err) } } } diff --git a/validator/metrics.go b/validator/metrics.go index d9ac75e5e..4105dd166 100644 --- a/validator/metrics.go +++ b/validator/metrics.go @@ -163,11 +163,6 @@ func (nm *NodeMetrics) watchStatusFile(statusFile *promcli.Gauge, statusFileFile for { _, err := os.Stat(outputDirFlag + "/" + statusFileFilename) ready = !os.IsNotExist(err) - if !ready && statusFileFilename == driverStatusFile { - // check if the driver status file for pre-installed driver exists - _, err = os.Stat(outputDirFlag + "/" + hostDriverStatusFile) - ready = !os.IsNotExist(err) - } if ready != prevReady { prevReady = ready @@ -233,14 +228,17 @@ func (nm *NodeMetrics) watchDevicePluginValidation() { } func (nm *NodeMetrics) watchDriverValidation() { - driver := &Driver{} + driver := &Driver{ + ctx: nm.ctx, + } for { - _, _, err := driver.runValidation(true) + _, err := driver.runValidation(true) if err == nil { nm.driverValidation.Set(1) nm.driverValidationLastSuccess.Set(float64(time.Now().Unix())) } else { + log.Errorf("failed to validate driver: %v", err) nm.driverValidation.Set(0) } time.Sleep(driverValidationCheckDelaySeconds * time.Second) diff --git a/validator/multi-arch.mk b/validator/multi-arch.mk index 4f199ed5d..d0f4f06ac 100644 --- a/validator/multi-arch.mk +++ b/validator/multi-arch.mk @@ -13,7 +13,8 @@ # limitations under the License. PUSH_ON_BUILD ?= false -DOCKER_BUILD_OPTIONS = --output=type=image,push=$(PUSH_ON_BUILD) +ATTACH_ATTESTATIONS ?= false +DOCKER_BUILD_OPTIONS = --output=type=image,push=$(PUSH_ON_BUILD) --provenance=$(ATTACH_ATTESTATIONS) --sbom=$(ATTACH_ATTESTATIONS) DOCKER_BUILD_PLATFORM_OPTIONS = --platform=linux/amd64,linux/arm64 REGCTL ?= regctl @@ -21,8 +22,3 @@ $(PUSH_TARGETS): push-%: $(REGCTL) \ image copy \ $(IMAGE) $(OUT_IMAGE) - -push-short: - $(REGCTL) \ - image copy \ - $(IMAGE) $(OUT_IMAGE_NAME):$(OUT_IMAGE_VERSION) \ No newline at end of file diff --git a/validator/versions.mk b/validator/versions.mk index c8294d44b..b61bc782b 100644 --- a/validator/versions.mk +++ b/validator/versions.mk @@ -16,4 +16,4 @@ include $(CURDIR)/../versions.mk CUDA_SAMPLES_VERSION ?= 11.7.1 -GOLANG_VERSION ?= 1.21.1 +GOLANG_VERSION ?= 1.23.3 diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml new file mode 100644 index 000000000..a8bc979e0 --- /dev/null +++ b/vendor/dario.cat/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore new file mode 100644 index 000000000..45ad0f1ae --- /dev/null +++ b/vendor/dario.cat/mergo/.gitignore @@ -0,0 +1,36 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Golang/Intellij +.idea + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml new file mode 100644 index 000000000..d324c43ba --- /dev/null +++ b/vendor/dario.cat/mergo/.travis.yml @@ -0,0 +1,12 @@ +language: go +arch: + - amd64 + - ppc64le +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -race -v ./... +after_script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..469b44907 --- /dev/null +++ b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md new file mode 100644 index 000000000..0a1ff9f94 --- /dev/null +++ b/vendor/dario.cat/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE new file mode 100644 index 000000000..686680298 --- /dev/null +++ b/vendor/dario.cat/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md new file mode 100644 index 000000000..0b3c48889 --- /dev/null +++ b/vendor/dario.cat/mergo/README.md @@ -0,0 +1,258 @@ +# Mergo + +[![GitHub release][5]][6] +[![GoCard][7]][8] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] + +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild). + +No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases. + +### Important notes + +#### 1.0.0 + +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released. + +If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL: + +``` +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16 +``` + +#### 0.3.9 + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: + +Donate using Liberapay +Become my sponsor + +### Mergo in the wild + +Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including: + +* [containerd/containerd](https://github.com/containerd/containerd) +* [datadog/datadog-agent](https://github.com/datadog/datadog-agent) +* [docker/cli/](https://github.com/docker/cli/) +* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +* [go-micro/go-micro](https://github.com/go-micro/go-micro) +* [grafana/loki](https://github.com/grafana/loki) +* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +* [masterminds/sprig](github.com/Masterminds/sprig) +* [moby/moby](https://github.com/moby/moby) +* [slackhq/nebula](https://github.com/slackhq/nebula) +* [volcano-sh/volcano](https://github.com/volcano-sh/volcano) + +## Install + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`: + +```go +package main + +import ( + "fmt" + + "dario.cat/mergo" +) + +type Foo struct { + A *string + B int64 +} + +func main() { + first := "first" + second := "second" + src := Foo{ + A: &first, + B: 2, + } + + dest := Foo{ + A: &second, + B: 1, + } + + mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference) +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +Here is a nice example: + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v3 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" +) + +type timeTransformer struct { +} + +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md new file mode 100644 index 000000000..a5de61f77 --- /dev/null +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go new file mode 100644 index 000000000..7d96ec054 --- /dev/null +++ b/vendor/dario.cat/mergo/doc.go @@ -0,0 +1,148 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +# Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +# Important notes + +1.0.0 + +In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. + +0.3.9 + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +# Install + +Do your usual installation procedure: + + go get dario.cat/mergo + + // use in your .go code + import ( + "dario.cat/mergo" + ) + +# Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "dario.cat/mergo" + ) + + type Foo struct { + A string + B int64 + } + + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} + } + +# Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "dario.cat/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { + } + + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } + + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + } + +# Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +# About + +Written by Dario Castañé: https://da.rio.hn + +# License + +BSD 3-Clause license, as Go language. +*/ +package mergo diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go new file mode 100644 index 000000000..759b4f74f --- /dev/null +++ b/vendor/dario.cat/mergo/map.go @@ -0,0 +1,178 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go new file mode 100644 index 000000000..fd47c95b2 --- /dev/null +++ b/vendor/dario.cat/mergo/merge.go @@ -0,0 +1,409 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasMergeableFields(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Transformers Transformers + Overwrite bool + ShouldNotDereference bool + AppendSlice bool + TypeCheck bool + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasMergeableFields(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + + if src.Kind() != reflect.Map { + if overwrite && dst.CanSet() { + dst.Set(src) + } + return + } + + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) + } + } + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } + } + + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if src.Elem().Kind() != reflect.Struct { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break + } + default: + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go new file mode 100644 index 000000000..0a721e2d8 --- /dev/null +++ b/vendor/dario.cat/mergo/mergo.go @@ -0,0 +1,81 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerArgument = errors.New("dst must be a pointer") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + typ reflect.Type + next *visit + ptr uintptr +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 3651cfa96..639e6c399 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -9,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show v0.4.0`). -This library requires Go 1.13 or newer; add it to your go.mod with: +This library requires Go 1.18 or newer; add it to your go.mod with: % go get github.com/BurntSushi/toml@latest diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 4d38f3bfc..7aaf462c9 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -6,7 +6,7 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" + "io/fs" "math" "os" "reflect" @@ -18,13 +18,13 @@ import ( // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { - UnmarshalTOML(interface{}) error + UnmarshalTOML(any) error } // Unmarshal decodes the contents of data in TOML format into a pointer v. // // See [Decoder] for a description of the decoding process. -func Unmarshal(data []byte, v interface{}) error { +func Unmarshal(data []byte, v any) error { _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } @@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error { // Decode the TOML data in to the pointer v. // // See [Decoder] for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { +func Decode(data string, v any) (MetaData, error) { return NewDecoder(strings.NewReader(data)).Decode(v) } // DecodeFile reads the contents of a file and decodes it with [Decode]. -func DecodeFile(path string, v interface{}) (MetaData, error) { +func DecodeFile(path string, v any) (MetaData, error) { fp, err := os.Open(path) if err != nil { return MetaData{}, err @@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { return NewDecoder(fp).Decode(v) } +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // // This type can be used for any value, which will cause decoding to be delayed. @@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { // overhead of reflection. They can be useful when you don't know the exact type // of TOML data until runtime. type Primitive struct { - undecoded interface{} + undecoded any context Key } @@ -122,7 +133,7 @@ var ( ) // Decode TOML data in to the pointer `v`. -func (dec *Decoder) Decode(v interface{}) (MetaData, error) { +func (dec *Decoder) Decode(v any) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { s := "%q" @@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } - // Check if this is a supported type: struct, map, interface{}, or something - // that implements UnmarshalTOML or UnmarshalText. + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. rv = indirect(rv) rt := rv.Type() if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && @@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { // TODO: parser should read from io.Reader? Or at the very least, make it // read from []byte rather than string - data, err := ioutil.ReadAll(dec.r) + data, err := io.ReadAll(dec.r) if err != nil { return MetaData{}, err } @@ -179,7 +190,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { // will only reflect keys that were decoded. Namely, any keys hidden behind a // Primitive will be considered undecoded. Executing this method will update the // undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { md.context = primValue.context defer func() { md.context = nil }() return md.unify(primValue.undecoded, rvalue(v)) @@ -190,7 +201,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { +func (md *MetaData) unify(data any, rv reflect.Value) error { // Special case. Look for a `Primitive` value. // TODO: #76 would make this superfluous after implemented. if rv.Type() == primitiveType { @@ -207,7 +218,11 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { rvi := rv.Interface() if v, ok := rvi.(Unmarshaler); ok { - return v.UnmarshalTOML(data) + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) + } + return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) @@ -227,14 +242,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return md.unifyInt(data, rv) } switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil case reflect.Struct: return md.unifyStruct(data, rv) case reflect.Map: @@ -258,14 +265,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return md.e("unsupported type %s", rv.Kind()) } -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) if !ok { if mapping == nil { return nil } - return md.e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) } for key, datum := range tmap { @@ -304,14 +310,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { keyType := rv.Type().Key().Kind() if keyType != reflect.String && keyType != reflect.Interface { return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", keyType, rv.Type()) } - tmap, ok := mapping.(map[string]interface{}) + tmap, ok := mapping.(map[string]any) if !ok { if tmap == nil { return nil @@ -347,7 +353,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -361,7 +367,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { return md.unifySliceArray(datav, rv) } -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -388,7 +394,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { return nil } -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyString(data any, rv reflect.Value) error { _, ok := rv.Interface().(json.Number) if ok { if i, ok := data.(int64); ok { @@ -408,7 +414,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { return md.badtype("string", data) } -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { rvk := rv.Kind() if num, ok := data.(float64); ok { @@ -429,7 +435,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { if num, ok := data.(int64); ok { if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { - return md.parseErr(errParseRange{i: num, size: rvk.String()}) + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) } rv.SetFloat(float64(num)) return nil @@ -438,7 +444,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { return md.badtype("float", data) } -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { _, ok := rv.Interface().(time.Duration) if ok { // Parse as string duration, and fall back to regular integer parsing @@ -481,7 +487,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { if b, ok := data.(bool); ok { rv.SetBool(b) return nil @@ -489,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { return md.badtype("boolean", data) } -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { rv.Set(reflect.ValueOf(data)) return nil } -func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { case Marshaler: @@ -523,13 +529,13 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { - return err + return md.parseErr(err) } return nil } -func (md *MetaData) badtype(dst string, data interface{}) error { - return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) } func (md *MetaData) parseErr(err error) error { @@ -543,7 +549,7 @@ func (md *MetaData) parseErr(err error) error { } } -func (md *MetaData) e(format string, args ...interface{}) error { +func (md *MetaData) e(format string, args ...any) error { f := "toml: " if len(md.context) > 0 { f = fmt.Sprintf("toml: (last key %q): ", md.context) @@ -556,7 +562,7 @@ func (md *MetaData) e(format string, args ...interface{}) error { } // rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { +func rvalue(v any) reflect.Value { return indirect(reflect.ValueOf(v)) } @@ -600,3 +606,8 @@ func isUnifiable(rv reflect.Value) bool { } return false } + +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go deleted file mode 100644 index 086d0b686..000000000 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package toml - -import ( - "io/fs" -) - -// DecodeFS reads the contents of a file from [fs.FS] and decodes it with -// [Decode]. -func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { - fp, err := fsys.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index b9e309717..155709a80 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -15,15 +15,15 @@ type TextMarshaler encoding.TextMarshaler // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + // PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). // // Deprecated: use MetaData.PrimitiveDecode. -func PrimitiveDecode(primValue Primitive, v interface{}) error { +func PrimitiveDecode(primValue Primitive, v any) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } - -// DecodeReader is an alias for NewDecoder(r).Decode(v). -// -// Deprecated: use NewDecoder(reader).Decode(&value). -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index 81a7c0fe9..82c90a905 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -2,9 +2,6 @@ // // This package supports TOML v1.0.0, as specified at https://toml.io // -// There is also support for delaying decoding with the Primitive type, and -// querying the set of keys in a TOML document with the MetaData type. -// // The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, // and can be used to verify if TOML document is valid. It can also be used to // print the type of each key. diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 9cd25d757..73366c0d9 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,6 +2,7 @@ package toml import ( "bufio" + "bytes" "encoding" "encoding/json" "errors" @@ -76,6 +77,17 @@ type Marshaler interface { MarshalTOML() ([]byte, error) } +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + // Encoder encodes a Go to a TOML document. // // The mapping between Go values and TOML values should be precisely the same as @@ -115,26 +127,21 @@ type Marshaler interface { // NOTE: only exported keys are encoded due to the use of reflection. Unexported // keys are silently discarded. type Encoder struct { - // String to use for a single indentation level; default is two spaces. - Indent string - + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? w *bufio.Writer - hasWritten bool // written any output to w yet? } // NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } + return &Encoder{w: bufio.NewWriter(w), Indent: " "} } // Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // // An error is returned if the value given cannot be encoded to a valid TOML // document. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { rv := eindirect(reflect.ValueOf(v)) err := enc.safeEncode(Key([]string{}), rv) if err != nil { @@ -280,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Float32: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) } @@ -304,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Interface: enc.eElement(rv.Elem()) default: - encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) } } @@ -712,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { } } -func (enc *Encoder) wf(format string, v ...interface{}) { +func (enc *Encoder) wf(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index efd68865b..b45a3f45f 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -114,13 +114,22 @@ func (pe ParseError) ErrorWithPosition() string { msg, pe.Position.Line, col, col+pe.Position.Len) } if pe.Position.Line > 2 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) } if pe.Position.Line > 1 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) } - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -159,17 +168,47 @@ func (pe ParseError) column(lines []string) int { return col } +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' + } + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 + } + } + return b.String() +} + type ( errLexControl struct{ r rune } errLexEscape struct{ r rune } errLexUTF8 struct{ b byte } - errLexInvalidNum struct{ v string } - errLexInvalidDate struct{ v string } + errParseDate struct{ v string } errLexInlineTableNL struct{} errLexStringNL struct{} errParseRange struct { - i interface{} // int or float - size string // "int64", "uint16", etc. + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" } errParseDuration struct{ d string } ) @@ -183,18 +222,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape func (e errLexEscape) Usage() string { return usageEscape } func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } func (e errLexUTF8) Usage() string { return "" } -func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } -func (e errLexInvalidNum) Usage() string { return "" } -func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } -func (e errLexInvalidDate) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } func (e errLexStringNL) Usage() string { return usageStringNewline } func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } func (e errParseRange) Usage() string { return usageIntOverflow } -func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } -func (e errParseDuration) Usage() string { return usageDuration } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } const usageEscape = ` A '\' inside a "-delimited string is interpreted as an escape character. @@ -251,19 +292,35 @@ bug in the program that uses too small of an integer. The maximum and minimum values are: size │ lowest │ highest - ───────┼────────────────┼────────── + ───────┼────────────────┼────────────── int8 │ -128 │ 127 int16 │ -32,768 │ 32,767 int32 │ -2,147,483,648 │ 2,147,483,647 int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ uint8 │ 0 │ 255 - uint16 │ 0 │ 65535 - uint32 │ 0 │ 4294967295 + uint16 │ 0 │ 65,535 + uint32 │ 0 │ 4,294,967,295 uint64 │ 0 │ 1.8 × 10¹⁸ int refers to int32 on 32-bit systems and int64 on 64-bit systems. ` +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + const usageDuration = ` A duration must be as "number", without any spaces. Valid units are: @@ -277,3 +334,23 @@ A duration must be as "number", without any spaces. Valid units are: You can combine multiple units; for example "5m10s" for 5 minutes and 10 seconds. ` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 3545a6ad6..a1016d98a 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -17,6 +17,7 @@ const ( itemEOF itemText itemString + itemStringEsc itemRawString itemMultilineString itemRawMultilineString @@ -53,6 +54,7 @@ type lexer struct { state stateFn items chan item tomlNext bool + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -164,7 +166,7 @@ func (lx *lexer) next() (r rune) { } r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - if r == utf8.RuneError { + if r == utf8.RuneError && w == 1 { lx.error(errLexUTF8{lx.input[lx.pos]}) return utf8.RuneError } @@ -270,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { } // errorf is like error, and creates a new error. -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { +func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() pos.Line-- @@ -333,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf( - "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", - r) + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -698,7 +698,12 @@ func lexString(lx *lexer) stateFn { return lexStringEscape case r == '"': lx.backup() - lx.emit(itemString) + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } lx.next() lx.ignore() return lx.pop() @@ -748,6 +753,7 @@ func lexMultilineString(lx *lexer) stateFn { lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() + lx.esc = false lx.emit(itemMultilineString) lx.next() /// Read over ''' again and discard it. lx.next() @@ -837,6 +843,7 @@ func lexMultilineStringEscape(lx *lexer) stateFn { } func lexStringEscape(lx *lexer) stateFn { + lx.esc = true r := lx.next() switch r { case 'e': @@ -879,10 +886,8 @@ func lexHexEscape(lx *lexer) stateFn { var r rune for i := 0; i < 2; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected two hexadecimal digits after '\x', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) } } return lx.pop() @@ -892,10 +897,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected four hexadecimal digits after '\u', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) } } return lx.pop() @@ -905,10 +908,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 8; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected eight hexadecimal digits after '\U', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) } } return lx.pop() @@ -975,7 +976,7 @@ func lexDatetime(lx *lexer) stateFn { // lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if isHexadecimal(r) { + if isHex(r) { return lexHexInteger } switch r { @@ -1109,7 +1110,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { return lexOctalInteger case 'x': r = lx.peek() - if !isHexadecimal(r) { + if !isHex(r) { lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) } return lexHexInteger @@ -1207,7 +1208,7 @@ func (itype itemType) String() string { return "EOF" case itemText: return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: return "String" case itemBool: return "Bool" @@ -1240,7 +1241,7 @@ func (itype itemType) String() string { } func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) + return fmt.Sprintf("(%s, %s)", item.typ, item.val) } func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } @@ -1256,10 +1257,7 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') -} - +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { if tomlNext { return (r >= 'A' && r <= 'Z') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 2e78b24e9..e61453730 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -13,7 +13,7 @@ type MetaData struct { context Key // Used only during decoding. keyInfo map[string]keyInfo - mapping map[string]interface{} + mapping map[string]any keys []Key decoded map[string]struct{} data []byte // Input file; for errors. @@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool { } var ( - hash map[string]interface{} + hash map[string]any ok bool - hashOrVal interface{} = md.mapping + hashOrVal any = md.mapping ) for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { + if hash, ok = hashOrVal.(map[string]any); !ok { return false } if hashOrVal, ok = hash[k]; !ok { @@ -94,28 +94,55 @@ func (md *MetaData) Undecoded() []Key { type Key []string func (k Key) String() string { - ss := make([]string, len(k)) - for i := range k { - ss[i] = k.maybeQuoted(i) + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } } - return strings.Join(ss, ".") + return b.String() } func (k Key) maybeQuoted(i int) string { if k[i] == "" { return `""` } - for _, c := range k[i] { - if !isBareKeyChar(c, false) { - return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } return k[i] } +// Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { + if cap(k) > len(k) { + return append(k, piece) + } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece return newKey } + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 9c1915369..11ac3108b 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,7 @@ package toml import ( "fmt" + "math" "os" "strconv" "strings" @@ -20,9 +21,9 @@ type parser struct { ordered []Key // List of keys in the order that they appear in the TOML data. - keyInfo map[string]keyInfo // Map keyname → info about the TOML key. - mapping map[string]interface{} // Map keyname → key value. - implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]any // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } type keyInfo struct { @@ -49,6 +50,7 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -71,7 +73,7 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), - mapping: make(map[string]interface{}), + mapping: make(map[string]any), lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), @@ -97,7 +99,7 @@ func (p *parser) panicErr(it item, err error) { }) } -func (p *parser) panicItemf(it item, format string, v ...interface{}) { +func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), Position: it.pos, @@ -106,7 +108,7 @@ func (p *parser) panicItemf(it item, format string, v ...interface{}) { }) } -func (p *parser) panicf(format string, v ...interface{}) { +func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), Position: p.pos, @@ -139,7 +141,7 @@ func (p *parser) nextPos() item { return it } -func (p *parser) bug(format string, v ...interface{}) { +func (p *parser) bug(format string, v ...any) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -194,11 +196,11 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } @@ -207,7 +209,8 @@ func (p *parser) topLevel(item item) { /// Set value. vItem := p.next() val, typ := p.value(vItem, false) - p.set(p.currentKey, val, typ, vItem.pos) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -222,7 +225,7 @@ func (p *parser) keyString(it item) string { switch it.typ { case itemText: return it.val - case itemString, itemMultilineString, + case itemString, itemStringEsc, itemMultilineString, itemRawString, itemRawMultilineString: s, _ := p.value(it, false) return s.(string) @@ -239,9 +242,11 @@ var datetimeRepl = strings.NewReplacer( // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { switch it.typ { case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) @@ -274,7 +279,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { panic("unreachable") } -func (p *parser) valueInteger(it item) (interface{}, tomlType) { +func (p *parser) valueInteger(it item) (any, tomlType) { if !numUnderscoresOK(it.val) { p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) } @@ -298,7 +303,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { return num, p.typeOfPrimitive(it) } -func (p *parser) valueFloat(it item) (interface{}, tomlType) { +func (p *parser) valueFloat(it item) (any, tomlType) { parts := strings.FieldsFunc(it.val, func(r rune) bool { switch r { case '.', 'e', 'E': @@ -322,7 +327,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) } val := strings.Replace(it.val, "_", "", -1) - if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" val = "nan" } num, err := strconv.ParseFloat(val, 64) @@ -333,6 +340,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float value: %q", it.val) } } + if signbit { + num = math.Copysign(num, -1) + } return num, p.typeOfPrimitive(it) } @@ -352,7 +362,7 @@ var dtTypes = []struct { {"15:04", internal.LocalTime, true}, } -func (p *parser) valueDatetime(it item) (interface{}, tomlType) { +func (p *parser) valueDatetime(it item) (any, tomlType) { it.val = datetimeRepl.Replace(it.val) var ( t time.Time @@ -365,26 +375,44 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } ok = true break } } if !ok { - p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + p.panicErr(it, errParseDate{it.val}) } return t, p.typeOfPrimitive(it) } -func (p *parser) valueArray(it item) (interface{}, tomlType) { +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} + +func (p *parser) valueArray(it item) (any, tomlType) { p.setType(p.currentKey, tomlArray, it.pos) var ( - types []tomlType - - // Initialize to a non-nil empty slice. This makes it consistent with - // how S = [] decodes into a non-nil slice inside something like struct - // { S []string }. See #338 - array = []interface{}{} + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) ) for it = p.next(); it.typ != itemArrayEnd; it = p.next() { if it.typ == itemCommentStart { @@ -394,21 +422,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { val, typ := p.value(it, true) array = append(array, val) - types = append(types, typ) - // XXX: types isn't used here, we need it to record the accurate type + // XXX: type isn't used here, we need it to record the accurate type // information. // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? - _ = types + _ = typ } return array, tomlArray } -func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { var ( - hash = make(map[string]interface{}) + topHash = make(map[string]any) outerContext = p.context outerKey = p.currentKey ) @@ -436,11 +463,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } @@ -448,7 +475,21 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom /// Set the value. val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ, it.pos) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } hash[p.currentKey] = val /// Restore context. @@ -456,7 +497,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom } p.context = outerContext p.currentKey = outerKey - return hash, tomlHash + return topHash, tomlHash } // numHasLeadingZero checks if this number has leading zeroes, allowing for '0', @@ -486,9 +527,9 @@ func numUnderscoresOK(s string) bool { } } - // isHexadecimal is a superset of all the permissable characters - // surrounding an underscore. - accept = isHexadecimal(r) + // isHexis a superset of all the permissable characters surrounding an + // underscore. + accept = isHex(r) } return accept } @@ -511,21 +552,19 @@ func numPeriodsOK(s string) bool { // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. func (p *parser) addContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. + /// Always start at the top level and drill down for our context. hashContext := p.mapping - keyContext := make(Key, 0) + keyContext := make(Key, 0, len(key)-1) - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] keyContext = append(keyContext, k) // No key? Make an implicit hash and move on. if !ok { p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) + hashContext[k] = make(map[string]any) } // If the hash context is actually an array of tables, then set @@ -534,9 +573,9 @@ func (p *parser) addContext(key Key, array bool) { // Otherwise, it better be a table, since this MUST be a key group (by // virtue of it not being the last element in a key). switch t := hashContext[k].(type) { - case []map[string]interface{}: + case []map[string]any: hashContext = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hashContext = t default: p.panicf("Key '%s' was already created as a hash.", keyContext) @@ -547,39 +586,33 @@ func (p *parser) addContext(key Key, array bool) { if array { // If this is the first element for this array, then allocate a new // list of tables for it. - k := key[len(key)-1] + k := key.last() if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 4) + hashContext[k] = make([]map[string]any, 0, 4) } // Add a new table. But make sure the key hasn't already been used // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) } else { p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) + p.setValue(key.last(), make(map[string]any)) } - p.context = append(p.context, key[len(key)-1]) -} - -// set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { - p.setValue(key, val) - p.setType(key, typ, pos) + p.context = append(p.context, key.last()) } // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. -func (p *parser) setValue(key string, value interface{}) { +func (p *parser) setValue(key string, value any) { var ( - tmpHash interface{} + tmpHash any ok bool hash = p.mapping - keyContext Key + keyContext = make(Key, 0, len(p.context)+1) ) for _, k := range p.context { keyContext = append(keyContext, k) @@ -587,11 +620,11 @@ func (p *parser) setValue(key string, value interface{}) { p.bug("Context for key '%s' has not been established.", keyContext) } switch t := tmpHash.(type) { - case []map[string]interface{}: + case []map[string]any: // The context is a table of hashes. Pick the most recent table // defined as the current hash. hash = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hash = t default: p.panicf("Key '%s' has already been defined.", keyContext) @@ -618,9 +651,8 @@ func (p *parser) setValue(key string, value interface{}) { p.removeImplicit(keyContext) return } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } @@ -683,8 +715,11 @@ func stripFirstNewline(s string) string { // the next newline. After a line-ending backslash, all whitespace is removed // until the next non-whitespace character. func (p *parser) stripEscapedNewlines(s string) string { - var b strings.Builder - var i int + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) for { ix := strings.Index(s[i:], `\`) if ix < 0 { @@ -714,9 +749,8 @@ func (p *parser) stripEscapedNewlines(s string) string { continue } if !strings.Contains(s[i:j], "\n") { - // This is not a line-ending backslash. - // (It's a bad escape sequence, but we can let - // replaceEscapes catch it.) + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) i++ continue } @@ -727,79 +761,78 @@ func (p *parser) stripEscapedNewlines(s string) string { } func (p *parser) replaceEscapes(it item, str string) string { - replaced := make([]rune, 0, len(str)) - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- continue } - r += 1 - if r >= len(s) { + if c != '\\' { + b.WriteRune(c) + continue + } + + if i >= len(str) { p.bug("Escape sequence at end of string.") return "" } - switch s[r] { + switch str[i+1] { default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) case ' ', '\t': - p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 + b.WriteByte(0x08) + skip = 1 case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 + b.WriteByte(0x09) + skip = 1 case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 + b.WriteByte(0x0a) + skip = 1 case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 + b.WriteByte(0x0c) + skip = 1 case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 + b.WriteByte(0x0d) + skip = 1 case 'e': if p.tomlNext { - replaced = append(replaced, rune(0x001B)) - r += 1 + b.WriteByte(0x1b) + skip = 1 } case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 + b.WriteByte(0x22) + skip = 1 case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. case 'x': if p.tomlNext { - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) - replaced = append(replaced, escaped) - r += 3 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 } case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) - replaced = append(replaced, escaped) - r += 5 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) - replaced = append(replaced, escaped) - r += 9 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 } } - return string(replaced) + return b.String() } -func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { - s := string(bs) +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 254ca82e5..10c51f7ee 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -25,10 +25,8 @@ type field struct { // breaking ties with index sequence. type byName []field -func (x byName) Len() int { return len(x) } - +func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name @@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool { // byIndex sorts field by index sequence. type byIndex []field -func (x byIndex) Len() int { return len(x) } - +func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go index 4e90d7737..1c090d331 100644 --- a/vendor/github.com/BurntSushi/toml/type_toml.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool { type tomlBaseType string -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } var ( tomlInteger tomlBaseType = "Integer" @@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { return tomlFloat case itemDatetime: return tomlDatetime - case itemString: + case itemString, itemStringEsc: return tomlString case itemMultilineString: return tomlString diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index f12626423..f95a504fe 100644 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,33 @@ # Changelog +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + ## 3.2.0 (2022-11-28) ### Added diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile index 0e7b5c713..9ca87a2c7 100644 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -19,6 +19,7 @@ test-cover: .PHONY: fuzz fuzz: @echo "==> Running Fuzz Tests" + go env GOCACHE go test -fuzz=FuzzNewVersion -fuzztime=15s . go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . go test -fuzz=FuzzNewConstraint -fuzztime=15s . @@ -27,4 +28,4 @@ $(GOLANGCI_LINT): # Install golangci-lint. The configuration for it is in the .golangci.yml # file in the root of the repository echo ${GOPATH} - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index eab8cac3b..ed5693608 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds [![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - ## Package Versions -Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. +Note, import `github.com/Masterminds/semver/v3` to use the latest version. There are three major versions fo the `semver` package. @@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on differences to notes between these two methods of comparison. 1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases + and others it will follow the specification and always include pre-releases within the comparison. It will provide an answer that is valid with the comparison section of the spec at https://semver.org/#spec-item-11 2. When constraint checking is used for checks or validation it will follow a different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the + and Rust/Cargo. This includes considering pre-releases to be invalid if the ranges does not include one. If you want to have it include pre-releases a simple solution is to include `-0` in your range. 3. Constraint ranges can have some complex rules including the shorthand use of @@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3") if err != nil { // Handle version not being parsable. } -// Check if the version meets the constraints. The a variable will be true. +// Check if the version meets the constraints. The variable a will be true. a := c.Check(v) ``` @@ -137,20 +134,20 @@ The basic comparisons are: ### Working With Prerelease Versions Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of prereleases include -development, alpha, beta, and release candidate releases. A prerelease may be +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precedence, prereleases come before their associated releases. In this +order of precedence, pre-releases come before their associated releases. In this example `1.2.3-beta.1 < 1.2.3`. -According to the Semantic Version specification prereleases may not be +According to the Semantic Version specification, pre-releases may not be API compliant with their release counterpart. It says, > A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. -SemVer comparisons using constraints without a prerelease comparator will skip -prerelease versions. For example, `>=1.2.3` will skip prereleases when looking -at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. The reason for the `0` as a pre-release version in the example comparison is because pre-releases can only contain ASCII alphanumerics and hyphens (along with @@ -171,6 +168,9 @@ These look like: * `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + ### Wildcards In Comparisons The `x`, `X`, and `*` characters can be used as a wildcard character. This works diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index 7c4bed334..ff499fb66 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) { original: v, } - // check for prerelease or build metadata - var extra []string - if strings.ContainsAny(parts[2], "-+") { - // Start with the build metadata first as it needs to be on the right - extra = strings.SplitN(parts[2], "+", 2) - if len(extra) > 1 { - // build metadata found - sv.metadata = extra[1] - parts[2] = extra[0] + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err } + } - extra = strings.SplitN(parts[2], "-", 2) - if len(extra) > 1 { - // prerelease found - sv.pre = extra[1] - parts[2] = extra[0] + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err } } @@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) { } } - // Extract the major, minor, and patch elements onto the returned Version + // Extract major, minor, and patch var err error sv.major, err = strconv.ParseUint(parts[0], 10, 64) if err != nil { @@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) { return nil, err } - // No prerelease or build metadata found so returning now as a fastpath. - if sv.pre == "" && sv.metadata == "" { - return sv, nil - } - - if sv.pre != "" { - if err = validatePrerelease(sv.pre); err != nil { - return nil, err - } - } - - if sv.metadata != "" { - if err = validateMetadata(sv.metadata); err != nil { - return nil, err - } - } - return sv, nil } @@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + // GreaterThan tests if one version is greater than another one. func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + // Equal tests if two versions are equal to each other. // Note, versions can be equal with different metadata since metadata // is not considered part of the comparable version. func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } return v.Compare(o) == 0 } diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md index 2ce45dd4e..b5ef766a7 100644 --- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## Release 3.3.0 (2024-08-29) + +### Added + +- #400: added sha512sum function (thanks @itzik-elayev) + +### Changed + +- #407: Removed duplicate documentation (functions were documentated in 2 places) +- #290: Corrected copy/paster oops in math documentation (thanks @zzhu41) +- #369: Corrected template reference in docs (thanks @chey) +- #375: Added link to URL documenation (thanks @carlpett) +- #406: Updated the mergo dependency which had a breaking change (which was accounted for) +- #376: Fixed documentation error (thanks @jheyduk) +- #404: Updated dependency tree +- #391: Fixed misspelling (thanks @chrishalbert) +- #405: Updated Go versions used in testing + ## Release 3.2.3 (2022-11-29) ### Changed @@ -307,7 +325,7 @@ This release adds new functions, including: - Added `semver` and `semverCompare` for Semantic Versions - `list` replaces `tuple` - Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` +- Added `first`, `last`, `initial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` ## Release 2.9.0 (2017-02-23) @@ -361,7 +379,7 @@ Because we switched from `int` to `int64` as the return value for all integer ma - `min` complements `max` (formerly `biggest`) - `empty` indicates that a value is the empty value for its type - `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` - Date formatters have been added for HTML dates (as used in `date` input fields) - Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go index 13a5cd559..75fe027e4 100644 --- a/vendor/github.com/Masterminds/sprig/v3/crypto.go +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -14,6 +14,7 @@ import ( "crypto/rsa" "crypto/sha1" "crypto/sha256" + "crypto/sha512" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" @@ -36,6 +37,11 @@ import ( "golang.org/x/crypto/scrypt" ) +func sha512sum(input string) string { + hash := sha512.Sum512([]byte(input)) + return hex.EncodeToString(hash[:]) +} + func sha256sum(input string) string { hash := sha256.Sum256([]byte(input)) return hex.EncodeToString(hash[:]) diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go index ade889698..4315b3542 100644 --- a/vendor/github.com/Masterminds/sprig/v3/dict.go +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -1,7 +1,7 @@ package sprig import ( - "github.com/imdario/mergo" + "dario.cat/mergo" "github.com/mitchellh/copystructure" ) diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go index aabb9d448..91031d6d1 100644 --- a/vendor/github.com/Masterminds/sprig/v3/doc.go +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -6,7 +6,7 @@ inside of Go `html/template` and `text/template` files. To add these functions, use the `template.Funcs()` method: - t := templates.New("foo").Funcs(sprig.FuncMap()) + t := template.New("foo").Funcs(sprig.FuncMap()) Note that you should add the function map before you parse any template files. diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go index 57fcec1d9..cda47d26f 100644 --- a/vendor/github.com/Masterminds/sprig/v3/functions.go +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -22,8 +22,7 @@ import ( // // Use this to pass the functions into the template engine: // -// tpl := template.New("foo").Funcs(sprig.FuncMap())) -// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) func FuncMap() template.FuncMap { return HtmlFuncMap() } @@ -142,10 +141,13 @@ var genericMap = map[string]interface{}{ "swapcase": util.SwapCase, "shuffle": xstrings.Shuffle, "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // camelcase used to call xstrings.ToCamelCase, but that function had a breaking change in version + // 1.5 that moved it from upper camel case to lower camel case. This is a breaking change for sprig. + // A new xstrings.ToPascalCase function was added that provided upper camel case. + "camelcase": xstrings.ToPascalCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, // Switch order so that "foobar" | contains "foo" "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, @@ -159,6 +161,7 @@ var genericMap = map[string]interface{}{ "plural": plural, "sha1sum": sha1sum, "sha256sum": sha256sum, + "sha512sum": sha512sum, "adler32sum": adler32sum, "toString": strval, @@ -336,20 +339,20 @@ var genericMap = map[string]interface{}{ "mustChunk": mustChunk, // Crypto: - "bcrypt": bcrypt, - "htpasswd": htpasswd, - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genCAWithKey": generateCertificateAuthorityWithPEMKey, - "genSelfSignedCert": generateSelfSignedCertificate, + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, - "genSignedCert": generateSignedCertificate, - "genSignedCertWithKey": generateSignedCertificateWithPEMKey, - "encryptAES": encryptAES, - "decryptAES": decryptAES, - "randBytes": randBytes, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, // UUIDs: "uuidv4": uuidv4, diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/mock.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/mock.go index 2a53d1741..78192f88c 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/mock.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/mock.go @@ -25,14 +25,15 @@ import ( "github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes" ) -// MockNvmdev mock mdev device +// MockNvmdev mock mdev device. type MockNvmdev struct { *nvmdev + pciDevicesRoot string } var _ Interface = (*MockNvmdev)(nil) -// NewMock creates new mock mediated (vGPU) and parent PCI devices and removes old devices +// NewMock creates new mock mediated (vGPU) and parent PCI devices and removes old devices. func NewMock() (mock *MockNvmdev, rerr error) { mdevParentsRootDir, err := os.MkdirTemp(os.TempDir(), "") if err != nil { @@ -53,23 +54,47 @@ func NewMock() (mock *MockNvmdev, rerr error) { } }() + pciRootDir, err := os.MkdirTemp(os.TempDir(), "") + if err != nil { + return nil, err + } + defer func() { + if rerr != nil { + os.RemoveAll(pciRootDir) + } + }() + + nvpciLib := nvpci.New(nvpci.WithPCIDevicesRoot(pciRootDir)) mock = &MockNvmdev{ - &nvmdev{mdevParentsRootDir, mdevDevicesRootDir}, + nvmdev: &nvmdev{ + mdevParentsRoot: mdevParentsRootDir, + mdevDevicesRoot: mdevDevicesRootDir, + nvpci: nvpciLib, + }, + pciDevicesRoot: pciRootDir, } return mock, nil } -// Cleanup removes the mocked mediated (vGPU) and parent PCI devices root folders +// Cleanup removes the mocked mediated (vGPU) and parent PCI devices root folders. func (m *MockNvmdev) Cleanup() { os.RemoveAll(m.mdevParentsRoot) os.RemoveAll(m.mdevDevicesRoot) + os.RemoveAll(m.pciDevicesRoot) } -// AddMockA100Parent creates an A100 like parent GPU mock device +// AddMockA100Parent creates an A100 like parent GPU mock device. func (m *MockNvmdev) AddMockA100Parent(address string, numaNode int) error { + pciDeviceDir := filepath.Join(m.pciDevicesRoot, address) + err := os.MkdirAll(pciDeviceDir, 0755) + if err != nil { + return err + } + + // /sys/class/mdev_bus/
is a symlink to /sys/bus/pci/devices/
deviceDir := filepath.Join(m.mdevParentsRoot, address) - err := os.MkdirAll(deviceDir, 0755) + err = os.Symlink(pciDeviceDir, deviceDir) if err != nil { return err } @@ -220,6 +245,9 @@ func (m *MockNvmdev) AddMockA100Mdev(uuid string, mdevType string, mdevTypeDir s return err } err = os.Symlink(filepath.Join(mdevDeviceDir, "vfio_mdev"), filepath.Join(mdevDeviceDir, "driver")) + if err != nil { + return err + } _, err = os.Create(filepath.Join(mdevDeviceDir, "200")) if err != nil { diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/nvmdev.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/nvmdev.go index 926125821..c85d79d4b 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/nvmdev.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvmdev/nvmdev.go @@ -33,7 +33,7 @@ const ( mdevDevicesRoot = "/sys/bus/mdev/devices" ) -// Interface allows us to get a list of NVIDIA MDEV (vGPU) and parent devices +// Interface allows us to get a list of NVIDIA MDEV (vGPU) and parent devices. type Interface interface { GetAllDevices() ([]*Device, error) GetAllParentDevices() ([]*ParentDevice, error) @@ -42,17 +42,18 @@ type Interface interface { type nvmdev struct { mdevParentsRoot string mdevDevicesRoot string + nvpci nvpci.Interface } var _ Interface = (*nvmdev)(nil) -// ParentDevice represents an NVIDIA parent PCI device +// ParentDevice represents an NVIDIA parent PCI device. type ParentDevice struct { *nvpci.NvidiaPCIDevice mdevPaths map[string]string } -// Device represents an NVIDIA MDEV (vGPU) device +// Device represents an NVIDIA MDEV (vGPU) device. type Device struct { Path string UUID string @@ -62,12 +63,29 @@ type Device struct { Parent *ParentDevice } -// New interface that allows us to get a list of all NVIDIA parent and MDEV (vGPU) devices -func New() Interface { - return &nvmdev{mdevParentsRoot, mdevDevicesRoot} +// New interface that allows us to get a list of all NVIDIA parent and MDEV (vGPU) devices. +func New(opts ...Option) Interface { + n := &nvmdev{mdevParentsRoot: mdevParentsRoot, mdevDevicesRoot: mdevDevicesRoot} + for _, opt := range opts { + opt(n) + } + if n.nvpci == nil { + n.nvpci = nvpci.New() + } + return n } -// GetAllParentDevices returns all NVIDIA Parent PCI devices on the system +// Option defines a function for passing options to the New() call. +type Option func(*nvmdev) + +// WithNvpciLib provides an Option to set the nvpci library. +func WithNvpciLib(nvpciLib nvpci.Interface) Option { + return func(n *nvmdev) { + n.nvpci = nvpciLib + } +} + +// GetAllParentDevices returns all NVIDIA Parent PCI devices on the system. func (m *nvmdev) GetAllParentDevices() ([]*ParentDevice, error) { deviceDirs, err := os.ReadDir(m.mdevParentsRoot) if err != nil { @@ -77,7 +95,7 @@ func (m *nvmdev) GetAllParentDevices() ([]*ParentDevice, error) { var nvdevices []*ParentDevice for _, deviceDir := range deviceDirs { devicePath := path.Join(m.mdevParentsRoot, deviceDir.Name()) - nvdevice, err := NewParentDevice(devicePath) + nvdevice, err := m.NewParentDevice(devicePath) if err != nil { return nil, fmt.Errorf("error constructing NVIDIA parent device: %v", err) } @@ -101,7 +119,7 @@ func (m *nvmdev) GetAllParentDevices() ([]*ParentDevice, error) { return nvdevices, nil } -// GetAllDevices returns all NVIDIA mdev (vGPU) devices on the system +// GetAllDevices returns all NVIDIA mdev (vGPU) devices on the system. func (m *nvmdev) GetAllDevices() ([]*Device, error) { deviceDirs, err := os.ReadDir(m.mdevDevicesRoot) if err != nil { @@ -110,7 +128,7 @@ func (m *nvmdev) GetAllDevices() ([]*Device, error) { var nvdevices []*Device for _, deviceDir := range deviceDirs { - nvdevice, err := NewDevice(m.mdevDevicesRoot, deviceDir.Name()) + nvdevice, err := m.NewDevice(m.mdevDevicesRoot, deviceDir.Name()) if err != nil { return nil, fmt.Errorf("error constructing MDEV device: %v", err) } @@ -123,8 +141,8 @@ func (m *nvmdev) GetAllDevices() ([]*Device, error) { return nvdevices, nil } -// NewDevice constructs a Device, which represents an NVIDIA mdev (vGPU) device -func NewDevice(root string, uuid string) (*Device, error) { +// NewDevice constructs a Device, which represents an NVIDIA mdev (vGPU) device. +func (n *nvmdev) NewDevice(root string, uuid string) (*Device, error) { path := path.Join(root, uuid) m, err := newMdev(path) @@ -132,7 +150,7 @@ func NewDevice(root string, uuid string) (*Device, error) { return nil, err } - parent, err := NewParentDevice(m.parentDevicePath()) + parent, err := n.NewParentDevice(m.parentDevicePath()) if err != nil { return nil, fmt.Errorf("error constructing NVIDIA PCI device: %v", err) } @@ -240,14 +258,15 @@ func (m mdev) iommuGroup() (int, error) { return int(iommuGroup), nil } -// NewParentDevice constructs a ParentDevice -func NewParentDevice(devicePath string) (*ParentDevice, error) { - nvdevice, err := newNvidiaPCIDeviceFromPath(devicePath) +// NewParentDevice constructs a ParentDevice. +func (m *nvmdev) NewParentDevice(devicePath string) (*ParentDevice, error) { + address := filepath.Base(devicePath) + nvdevice, err := m.nvpci.GetGPUByPciBusID(address) if err != nil { return nil, fmt.Errorf("failed to construct NVIDIA PCI device: %v", err) } if nvdevice == nil { - // not a NVIDIA device + // not a NVIDIA device. return nil, err } @@ -275,7 +294,7 @@ func NewParentDevice(devicePath string) (*ParentDevice, error) { return &ParentDevice{nvdevice, mdevTypesMap}, err } -// CreateMDEVDevice creates a mediated device (vGPU) on the parent GPU +// CreateMDEVDevice creates a mediated device (vGPU) on the parent GPU. func (p *ParentDevice) CreateMDEVDevice(mdevType string, id string) error { mdevPath, ok := p.mdevPaths[mdevType] if !ok { @@ -292,7 +311,7 @@ func (p *ParentDevice) CreateMDEVDevice(mdevType string, id string) error { return nil } -// DeleteMDEVDevice deletes a mediated device (vGPU) +// DeleteMDEVDevice deletes a mediated device (vGPU). func (p *ParentDevice) DeleteMDEVDevice(id string) error { removeFile, err := os.OpenFile(filepath.Join(p.Path, id, "remove"), os.O_WRONLY|os.O_SYNC, 0200) if err != nil { @@ -306,7 +325,7 @@ func (p *ParentDevice) DeleteMDEVDevice(id string) error { return nil } -// Delete deletes a mediated device (vGPU) +// Delete deletes a mediated device (vGPU). func (m *Device) Delete() error { removeFile, err := os.OpenFile(filepath.Join(m.Path, "remove"), os.O_WRONLY|os.O_SYNC, 0200) if err != nil { @@ -320,32 +339,27 @@ func (m *Device) Delete() error { return nil } -// GetPhysicalFunction gets the physical PCI device backing a 'parent' device -func (p *ParentDevice) GetPhysicalFunction() (*nvpci.NvidiaPCIDevice, error) { - if !p.IsVF { - return p.NvidiaPCIDevice, nil +// GetPhysicalFunction gets the physical PCI device backing a 'parent' device. +func (p *ParentDevice) GetPhysicalFunction() *nvpci.NvidiaPCIDevice { + if p.SriovInfo.IsVF() { + return p.SriovInfo.VirtualFunction.PhysicalFunction } - - physfnPath, err := filepath.EvalSymlinks(path.Join(p.Path, "physfn")) - if err != nil { - return nil, fmt.Errorf("unable to resolve %s: %v", path.Join(p.Path, "physfn"), err) - } - - return newNvidiaPCIDeviceFromPath(physfnPath) + // Either it is an SRIOV physical function or a non-SRIOV device, so return the device itself + return p.NvidiaPCIDevice } -// GetPhysicalFunction gets the physical PCI device that a vGPU is created on -func (m *Device) GetPhysicalFunction() (*nvpci.NvidiaPCIDevice, error) { +// GetPhysicalFunction gets the physical PCI device that a vGPU is created on. +func (m *Device) GetPhysicalFunction() *nvpci.NvidiaPCIDevice { return m.Parent.GetPhysicalFunction() } -// IsMDEVTypeSupported checks if the mdevType is supported by the GPU +// IsMDEVTypeSupported checks if the mdevType is supported by the GPU. func (p *ParentDevice) IsMDEVTypeSupported(mdevType string) bool { _, found := p.mdevPaths[mdevType] return found } -// IsMDEVTypeAvailable checks if a vGPU instance of mdevType can be created on the parent GPU +// IsMDEVTypeAvailable checks if a vGPU instance of mdevType can be created on the parent GPU. func (p *ParentDevice) IsMDEVTypeAvailable(mdevType string) (bool, error) { availableInstances, err := p.GetAvailableMDEVInstances(mdevType) if err != nil { @@ -375,12 +389,3 @@ func (p *ParentDevice) GetAvailableMDEVInstances(mdevType string) (int, error) { return availableInstances, nil } - -// newNvidiaPCIDeviceFromPath constructs an NvidiaPCIDevice for the specified device path. -func newNvidiaPCIDeviceFromPath(devicePath string) (*nvpci.NvidiaPCIDevice, error) { - root := filepath.Dir(devicePath) - address := filepath.Base(devicePath) - return nvpci.New( - nvpci.WithPCIDevicesRoot(root), - ).GetGPUByPciBusID(address) -} diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes/bytes.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes/bytes.go index 7788a1fbe..04fb4aa93 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes/bytes.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes/bytes.go @@ -21,12 +21,12 @@ import ( "unsafe" ) -// Raw returns just the bytes without any assumptions about layout +// Raw returns just the bytes without any assumptions about layout. type Raw interface { Raw() *[]byte } -// Reader used to read various data sizes in the byte array +// Reader used to read various data sizes in the byte array. type Reader interface { Read8(pos int) uint8 Read16(pos int) uint16 @@ -35,7 +35,7 @@ type Reader interface { Len() int } -// Writer used to write various sizes of data in the byte array +// Writer used to write various sizes of data in the byte array. type Writer interface { Write8(pos int, value uint8) Write16(pos int, value uint16) @@ -44,7 +44,7 @@ type Writer interface { Len() int } -// Bytes object for manipulating arbitrary byte arrays +// Bytes object for manipulating arbitrary byte arrays. type Bytes interface { Raw Reader @@ -70,12 +70,12 @@ func init() { } } -// New raw bytearray +// New raw bytearray. func New(data *[]byte) Bytes { return (*native)(data) } -// NewLittleEndian little endian ordering of bytes +// NewLittleEndian little endian ordering of bytes. func NewLittleEndian(data *[]byte) Bytes { if nativeByteOrder == binary.LittleEndian { return (*native)(data) @@ -84,7 +84,7 @@ func NewLittleEndian(data *[]byte) Bytes { return (*swapbo)(data) } -// NewBigEndian big endian ordering of bytes +// NewBigEndian big endian ordering of bytes. func NewBigEndian(data *[]byte) Bytes { if nativeByteOrder == binary.BigEndian { return (*native)(data) diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/config.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/config.go index e25e72f68..397c86508 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/config.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/config.go @@ -24,24 +24,24 @@ import ( ) const ( - // PCICfgSpaceStandardSize represents the size in bytes of the standard config space + // PCICfgSpaceStandardSize represents the size in bytes of the standard config space. PCICfgSpaceStandardSize = 256 - // PCICfgSpaceExtendedSize represents the size in bytes of the extended config space + // PCICfgSpaceExtendedSize represents the size in bytes of the extended config space. PCICfgSpaceExtendedSize = 4096 - // PCICapabilityListPointer represents offset for the capability list pointer + // PCICapabilityListPointer represents offset for the capability list pointer. PCICapabilityListPointer = 0x34 - // PCIStatusCapabilityList represents the status register bit which indicates capability list support + // PCIStatusCapabilityList represents the status register bit which indicates capability list support. PCIStatusCapabilityList = 0x10 - // PCIStatusBytePosition represents the position of the status register + // PCIStatusBytePosition represents the position of the status register. PCIStatusBytePosition = 0x06 ) -// ConfigSpace PCI configuration space (standard extended) file path +// ConfigSpace PCI configuration space (standard extended) file path. type ConfigSpace struct { Path string } -// ConfigSpaceIO Interface for reading and writing raw and preconfigured values +// ConfigSpaceIO Interface for reading and writing raw and preconfigured values. type ConfigSpaceIO interface { bytes.Bytes GetVendorID() uint16 @@ -53,18 +53,18 @@ type configSpaceIO struct { bytes.Bytes } -// PCIStandardCapability standard PCI config space +// PCIStandardCapability standard PCI config space. type PCIStandardCapability struct { bytes.Bytes } -// PCIExtendedCapability extended PCI config space +// PCIExtendedCapability extended PCI config space. type PCIExtendedCapability struct { bytes.Bytes Version uint8 } -// PCICapabilities combines the standard and extended config space +// PCICapabilities combines the standard and extended config space. type PCICapabilities struct { Standard map[uint8]*PCIStandardCapability Extended map[uint16]*PCIExtendedCapability diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mlxpci.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mlxpci.go index 62937d7f9..ddf7d19f8 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mlxpci.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mlxpci.go @@ -22,15 +22,15 @@ import ( ) const ( - // PCIMellanoxVendorID represents PCI vendor id for Mellanox + // PCIMellanoxVendorID represents PCI vendor id for Mellanox. PCIMellanoxVendorID uint16 = 0x15b3 - // PCINetworkControllerClass represents the PCI class for network controllers + // PCINetworkControllerClass represents the PCI class for network controllers. PCINetworkControllerClass uint32 = 0x020000 - // PCIBridgeClass represents the PCI class for network controllers + // PCIBridgeClass represents the PCI class for network controllers. PCIBridgeClass uint32 = 0x060400 ) -// GetNetworkControllers returns all Mellanox Network Controller PCI devices on the system +// GetNetworkControllers returns all Mellanox Network Controller PCI devices on the system. func (p *nvpci) GetNetworkControllers() ([]*NvidiaPCIDevice, error) { devices, err := p.GetAllDevices() if err != nil { @@ -47,7 +47,7 @@ func (p *nvpci) GetNetworkControllers() ([]*NvidiaPCIDevice, error) { return filtered, nil } -// GetPciBridges retrieves all Mellanox PCI(e) Bridges +// GetPciBridges retrieves all Mellanox PCI(e) Bridges. func (p *nvpci) GetPciBridges() ([]*NvidiaPCIDevice, error) { devices, err := p.GetAllDevices() if err != nil { @@ -64,17 +64,17 @@ func (p *nvpci) GetPciBridges() ([]*NvidiaPCIDevice, error) { return filtered, nil } -// IsNetworkController if class == 0x300 +// IsNetworkController if class == 0x300. func (d *NvidiaPCIDevice) IsNetworkController() bool { return d.Class == PCINetworkControllerClass } -// IsPciBridge if class == 0x0604 +// IsPciBridge if class == 0x0604. func (d *NvidiaPCIDevice) IsPciBridge() bool { return d.Class == PCIBridgeClass } -// IsDPU returns if a device is a DPU +// IsDPU returns if a device is a DPU. func (d *NvidiaPCIDevice) IsDPU() bool { if !strings.Contains(d.DeviceName, "BlueField") { return false @@ -87,7 +87,7 @@ func (d *NvidiaPCIDevice) IsDPU() bool { return false } -// GetDPUs returns all Mellanox DPU devices on the system +// GetDPUs returns all Mellanox DPU devices on the system. func (p *nvpci) GetDPUs() ([]*NvidiaPCIDevice, error) { devices, err := p.GetNetworkControllers() if err != nil { diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mmio.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mmio.go index 1535fa049..88dd7ddf5 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mmio.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mmio.go @@ -25,7 +25,7 @@ import ( "github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes" ) -// Mmio memory map a region +// Mmio memory map a region. type Mmio interface { bytes.Raw bytes.Reader @@ -84,12 +84,12 @@ func open(path string, offset int, size int, flags int) (Mmio, error) { return &mmio{bytes.New(&mmap)}, nil } -// OpenRO open region readonly +// OpenRO open region readonly. func OpenRO(path string, offset int, size int) (Mmio, error) { return open(path, offset, size, os.O_RDONLY) } -// OpenRW open region read write +// OpenRW open region read write. func OpenRW(path string, offset int, size int) (Mmio, error) { return open(path, offset, size, os.O_RDWR) } diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mock.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mock.go index 57151b3ff..da3074c17 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mock.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mmio/mock.go @@ -48,18 +48,21 @@ func mockOpen(source *[]byte, offset int, size int, rw bool) (Mmio, error) { return m, nil } -// MockOpenRO open read only +// MockOpenRO open read only. func MockOpenRO(source *[]byte, offset int, size int) (Mmio, error) { return mockOpen(source, offset, size, false) } -// MockOpenRW open read write +// MockOpenRW open read write. func MockOpenRW(source *[]byte, offset int, size int) (Mmio, error) { return mockOpen(source, offset, size, true) } func (m *mockMmio) Close() error { - m = &mockMmio{} + m.Bytes = nil + m.source = nil + m.offset = 0 + m.rw = false return nil } diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mock.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mock.go index e42271212..9b3d6e2aa 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mock.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/mock.go @@ -20,18 +20,20 @@ import ( "fmt" "os" "path/filepath" + "regexp" + "strconv" "github.com/NVIDIA/go-nvlib/pkg/nvpci/bytes" ) -// MockNvpci mock pci device +// MockNvpci mock pci device. type MockNvpci struct { *nvpci } var _ Interface = (*MockNvpci)(nil) -// NewMockNvpci create new mock PCI and remove old devices +// NewMockNvpci create new mock PCI and remove old devices. func NewMockNvpci() (mock *MockNvpci, rerr error) { rootDir, err := os.MkdirTemp(os.TempDir(), "") if err != nil { @@ -50,69 +52,119 @@ func NewMockNvpci() (mock *MockNvpci, rerr error) { return mock, nil } -// Cleanup remove the mocked PCI devices root folder +// Cleanup remove the mocked PCI devices root folder. func (m *MockNvpci) Cleanup() { os.RemoveAll(m.pciDevicesRoot) } -// AddMockA100 Create an A100 like GPU mock device -func (m *MockNvpci) AddMockA100(address string, numaNode int) error { +func validatePCIAddress(addr string) error { + r := regexp.MustCompile(`0{4}:[0-9a-f]{2}:[0-9a-f]{2}\.[0-9]`) + if !r.Match([]byte(addr)) { + return fmt.Errorf(`invalid PCI address should match 0{4}:[0-9a-f]{2}:[0-9a-f]{2}\.[0-9]: %s`, addr) + } + + return nil +} + +// AddMockA100 Create an A100 like GPU mock device. +func (m *MockNvpci) AddMockA100(address string, numaNode int, sriov *SriovInfo) error { + err := validatePCIAddress(address) + if err != nil { + return err + } + deviceDir := filepath.Join(m.pciDevicesRoot, address) - err := os.MkdirAll(deviceDir, 0755) + err = os.MkdirAll(deviceDir, 0755) if err != nil { return err } - vendor, err := os.Create(filepath.Join(deviceDir, "vendor")) + err = createNVIDIAgpuFiles(deviceDir) if err != nil { return err } - _, err = vendor.WriteString(fmt.Sprintf("0x%x", PCINvidiaVendorID)) + + iommuGroup := 20 + _, err = os.Create(filepath.Join(deviceDir, strconv.Itoa(iommuGroup))) + if err != nil { + return err + } + err = os.Symlink(filepath.Join(deviceDir, strconv.Itoa(iommuGroup)), filepath.Join(deviceDir, "iommu_group")) if err != nil { return err } - class, err := os.Create(filepath.Join(deviceDir, "class")) + numa, err := os.Create(filepath.Join(deviceDir, "numa_node")) if err != nil { return err } - _, err = class.WriteString(fmt.Sprintf("0x%x", PCI3dControllerClass)) + _, err = numa.WriteString(fmt.Sprintf("%v", numaNode)) if err != nil { return err } - device, err := os.Create(filepath.Join(deviceDir, "device")) + if sriov != nil && sriov.PhysicalFunction != nil { + totalVFs, err := os.Create(filepath.Join(deviceDir, "sriov_totalvfs")) + if err != nil { + return err + } + _, err = fmt.Fprintf(totalVFs, "%d", sriov.PhysicalFunction.TotalVFs) + if err != nil { + return err + } + + numVFs, err := os.Create(filepath.Join(deviceDir, "sriov_numvfs")) + if err != nil { + return err + } + _, err = fmt.Fprintf(numVFs, "%d", sriov.PhysicalFunction.NumVFs) + if err != nil { + return err + } + for i := 1; i <= int(sriov.PhysicalFunction.NumVFs); i++ { + err = m.createVf(address, i, iommuGroup, numaNode) + if err != nil { + return err + } + } + } + + return nil +} + +func createNVIDIAgpuFiles(deviceDir string) error { + vendor, err := os.Create(filepath.Join(deviceDir, "vendor")) if err != nil { return err } - _, err = device.WriteString("0x20bf") + _, err = vendor.WriteString(fmt.Sprintf("0x%x", PCINvidiaVendorID)) if err != nil { return err } - _, err = os.Create(filepath.Join(deviceDir, "nvidia")) + class, err := os.Create(filepath.Join(deviceDir, "class")) if err != nil { return err } - err = os.Symlink(filepath.Join(deviceDir, "nvidia"), filepath.Join(deviceDir, "driver")) + _, err = class.WriteString(fmt.Sprintf("0x%x", PCI3dControllerClass)) if err != nil { return err } - _, err = os.Create(filepath.Join(deviceDir, "20")) + device, err := os.Create(filepath.Join(deviceDir, "device")) if err != nil { return err } - err = os.Symlink(filepath.Join(deviceDir, "20"), filepath.Join(deviceDir, "iommu_group")) + _, err = device.WriteString("0x20bf") if err != nil { return err } - numa, err := os.Create(filepath.Join(deviceDir, "numa_node")) + _, err = os.Create(filepath.Join(deviceDir, "nvidia")) if err != nil { return err } - _, err = numa.WriteString(fmt.Sprintf("%v", numaNode)) + err = os.Symlink(filepath.Join(deviceDir, "nvidia"), filepath.Join(deviceDir, "driver")) if err != nil { return err } @@ -156,3 +208,53 @@ func (m *MockNvpci) AddMockA100(address string, numaNode int) error { return nil } + +func (m *MockNvpci) createVf(pfAddress string, id, iommu_group, numaNode int) error { + functionID := pfAddress[len(pfAddress)-1] + // we are verifying the last character of pfAddress is integer. + functionNumber, err := strconv.Atoi(string(functionID)) + if err != nil { + return fmt.Errorf("can't conver physical function pci address function number %s to integer: %v", string(functionID), err) + } + + vfFunctionNumber := functionNumber + id + vfAddress := pfAddress[:len(pfAddress)-1] + strconv.Itoa(vfFunctionNumber) + + deviceDir := filepath.Join(m.pciDevicesRoot, vfAddress) + err = os.MkdirAll(deviceDir, 0755) + if err != nil { + return err + } + + err = createNVIDIAgpuFiles(deviceDir) + if err != nil { + return err + } + + vfIommuGroup := strconv.Itoa(iommu_group + id) + + _, err = os.Create(filepath.Join(deviceDir, vfIommuGroup)) + if err != nil { + return err + } + err = os.Symlink(filepath.Join(deviceDir, vfIommuGroup), filepath.Join(deviceDir, "iommu_group")) + if err != nil { + return err + } + + numa, err := os.Create(filepath.Join(deviceDir, "numa_node")) + if err != nil { + return err + } + _, err = numa.WriteString(fmt.Sprintf("%v", numaNode)) + if err != nil { + return err + } + + err = os.Symlink(filepath.Join(m.pciDevicesRoot, pfAddress), filepath.Join(deviceDir, "physfn")) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/nvpci.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/nvpci.go index 5210ff504..6ff197b15 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/nvpci.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/nvpci.go @@ -29,23 +29,23 @@ import ( ) const ( - // PCIDevicesRoot represents base path for all pci devices under sysfs + // PCIDevicesRoot represents base path for all pci devices under sysfs. PCIDevicesRoot = "/sys/bus/pci/devices" - // PCINvidiaVendorID represents PCI vendor id for NVIDIA + // PCINvidiaVendorID represents PCI vendor id for NVIDIA. PCINvidiaVendorID uint16 = 0x10de - // PCIVgaControllerClass represents the PCI class for VGA Controllers + // PCIVgaControllerClass represents the PCI class for VGA Controllers. PCIVgaControllerClass uint32 = 0x030000 - // PCI3dControllerClass represents the PCI class for 3D Graphics accellerators + // PCI3dControllerClass represents the PCI class for 3D Graphics accellerators. PCI3dControllerClass uint32 = 0x030200 - // PCINvSwitchClass represents the PCI class for NVSwitches + // PCINvSwitchClass represents the PCI class for NVSwitches. PCINvSwitchClass uint32 = 0x068000 - // UnknownDeviceString is the device name to set for devices not found in the PCI database + // UnknownDeviceString is the device name to set for devices not found in the PCI database. UnknownDeviceString = "UNKNOWN_DEVICE" - // UnknownClassString is the class name to set for devices not found in the PCI database + // UnknownClassString is the class name to set for devices not found in the PCI database. UnknownClassString = "UNKNOWN_CLASS" ) -// Interface allows us to get a list of all NVIDIA PCI devices +// Interface allows us to get a list of all NVIDIA PCI devices. type Interface interface { GetAllDevices() ([]*NvidiaPCIDevice, error) Get3DControllers() ([]*NvidiaPCIDevice, error) @@ -59,10 +59,10 @@ type Interface interface { GetDPUs() ([]*NvidiaPCIDevice, error) } -// MemoryResources a more human readable handle +// MemoryResources a more human readable handle. type MemoryResources map[int]*MemoryResource -// ResourceInterface exposes some higher level functions of resources +// ResourceInterface exposes some higher level functions of resources. type ResourceInterface interface { GetTotalAddressableMemory(bool) (uint64, uint64) } @@ -76,7 +76,33 @@ type nvpci struct { var _ Interface = (*nvpci)(nil) var _ ResourceInterface = (*MemoryResources)(nil) -// NvidiaPCIDevice represents a PCI device for an NVIDIA product +// SriovInfo indicates whether device is VF/PF for SRIOV capable devices. +// Only one should be set at any given time. +type SriovInfo struct { + PhysicalFunction *SriovPhysicalFunction + VirtualFunction *SriovVirtualFunction +} + +// SriovPhysicalFunction stores info about SRIOV physical function. +type SriovPhysicalFunction struct { + TotalVFs uint64 + NumVFs uint64 +} + +// SriovVirtualFunction keeps data about SRIOV virtual function. +type SriovVirtualFunction struct { + PhysicalFunction *NvidiaPCIDevice +} + +func (s *SriovInfo) IsPF() bool { + return s != nil && s.PhysicalFunction != nil +} + +func (s *SriovInfo) IsVF() bool { + return s != nil && s.VirtualFunction != nil +} + +// NvidiaPCIDevice represents a PCI device for an NVIDIA product. type NvidiaPCIDevice struct { Path string Address string @@ -90,37 +116,37 @@ type NvidiaPCIDevice struct { NumaNode int Config *ConfigSpace Resources MemoryResources - IsVF bool + SriovInfo SriovInfo } -// IsVGAController if class == 0x300 +// IsVGAController if class == 0x300. func (d *NvidiaPCIDevice) IsVGAController() bool { return d.Class == PCIVgaControllerClass } -// Is3DController if class == 0x302 +// Is3DController if class == 0x302. func (d *NvidiaPCIDevice) Is3DController() bool { return d.Class == PCI3dControllerClass } -// IsNVSwitch if class == 0x068 +// IsNVSwitch if class == 0x068. func (d *NvidiaPCIDevice) IsNVSwitch() bool { return d.Class == PCINvSwitchClass } -// IsGPU either VGA for older cards or 3D for newer +// IsGPU either VGA for older cards or 3D for newer. func (d *NvidiaPCIDevice) IsGPU() bool { return d.IsVGAController() || d.Is3DController() } // IsResetAvailable some devices can be reset without rebooting, -// check if applicable +// check if applicable. func (d *NvidiaPCIDevice) IsResetAvailable() bool { _, err := os.Stat(path.Join(d.Path, "reset")) return err == nil } -// Reset perform a reset to apply a new configuration at HW level +// Reset perform a reset to apply a new configuration at HW level. func (d *NvidiaPCIDevice) Reset() error { err := os.WriteFile(path.Join(d.Path, "reset"), []byte("1"), 0) if err != nil { @@ -129,7 +155,7 @@ func (d *NvidiaPCIDevice) Reset() error { return nil } -// New interface that allows us to get a list of all NVIDIA PCI devices +// New interface that allows us to get a list of all NVIDIA PCI devices. func New(opts ...Option) Interface { n := &nvpci{} for _, opt := range opts { @@ -144,10 +170,10 @@ func New(opts ...Option) Interface { return n } -// Option defines a function for passing options to the New() call +// Option defines a function for passing options to the New() call. type Option func(*nvpci) -// WithLogger provides an Option to set the logger for the library +// WithLogger provides an Option to set the logger for the library. func WithLogger(logger logger) Option { return func(n *nvpci) { n.logger = logger @@ -170,7 +196,7 @@ func WithPCIDatabasePath(path string) Option { } } -// GetAllDevices returns all Nvidia PCI devices on the system +// GetAllDevices returns all Nvidia PCI devices on the system. func (p *nvpci) GetAllDevices() ([]*NvidiaPCIDevice, error) { deviceDirs, err := os.ReadDir(p.pciDevicesRoot) if err != nil { @@ -178,9 +204,11 @@ func (p *nvpci) GetAllDevices() ([]*NvidiaPCIDevice, error) { } var nvdevices []*NvidiaPCIDevice + // Cache devices for each GetAllDevices invocation to speed things up. + cache := make(map[string]*NvidiaPCIDevice) for _, deviceDir := range deviceDirs { deviceAddress := deviceDir.Name() - nvdevice, err := p.GetGPUByPciBusID(deviceAddress) + nvdevice, err := p.getGPUByPciBusID(deviceAddress, cache) if err != nil { return nil, fmt.Errorf("error constructing NVIDIA PCI device %s: %v", deviceAddress, err) } @@ -204,8 +232,18 @@ func (p *nvpci) GetAllDevices() ([]*NvidiaPCIDevice, error) { return nvdevices, nil } -// GetGPUByPciBusID constructs an NvidiaPCIDevice for the specified address (PCI Bus ID) +// GetGPUByPciBusID constructs an NvidiaPCIDevice for the specified address (PCI Bus ID). func (p *nvpci) GetGPUByPciBusID(address string) (*NvidiaPCIDevice, error) { + // Pass nil as to force reading device information from sysfs. + return p.getGPUByPciBusID(address, nil) +} + +func (p *nvpci) getGPUByPciBusID(address string, cache map[string]*NvidiaPCIDevice) (*NvidiaPCIDevice, error) { + if cache != nil { + if pciDevice, exists := cache[address]; exists { + return pciDevice, nil + } + } devicePath := filepath.Join(p.pciDevicesRoot, address) vendor, err := os.ReadFile(path.Join(devicePath, "vendor")) @@ -265,16 +303,6 @@ func (p *nvpci) GetGPUByPciBusID(address string) (*NvidiaPCIDevice, error) { return nil, fmt.Errorf("unable to detect iommu_group for %s: %v", address, err) } - // device is a virtual function (VF) if "physfn" symlink exists - var isVF bool - _, err = filepath.EvalSymlinks(path.Join(devicePath, "physfn")) - if err == nil { - isVF = true - } - if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to resolve %s: %v", path.Join(devicePath, "physfn"), err) - } - numa, err := os.ReadFile(path.Join(devicePath, "numa_node")) if err != nil { return nil, fmt.Errorf("unable to read PCI NUMA node for %s: %v", address, err) @@ -328,6 +356,28 @@ func (p *nvpci) GetGPUByPciBusID(address string) (*NvidiaPCIDevice, error) { className = UnknownClassString } + var sriovInfo SriovInfo + // Device is a virtual function (VF) if "physfn" symlink exists. + physFnAddress, err := filepath.EvalSymlinks(path.Join(devicePath, "physfn")) + if err == nil { + physFn, err := p.getGPUByPciBusID(filepath.Base(physFnAddress), cache) + if err != nil { + return nil, fmt.Errorf("unable to detect physfn for %s: %v", address, err) + } + sriovInfo = SriovInfo{ + VirtualFunction: &SriovVirtualFunction{ + PhysicalFunction: physFn, + }, + } + } else if os.IsNotExist(err) { + sriovInfo, err = p.getSriovInfoForPhysicalFunction(devicePath) + if err != nil { + return nil, fmt.Errorf("unable to read SRIOV physical function details for %s: %v", devicePath, err) + } + } else { + return nil, fmt.Errorf("unable to read %s: %v", path.Join(devicePath, "physfn"), err) + } + nvdevice := &NvidiaPCIDevice{ Path: devicePath, Address: address, @@ -339,15 +389,20 @@ func (p *nvpci) GetGPUByPciBusID(address string) (*NvidiaPCIDevice, error) { NumaNode: int(numaNode), Config: config, Resources: resources, - IsVF: isVF, DeviceName: deviceName, ClassName: className, + SriovInfo: sriovInfo, + } + + // Cache physical functions only as VF can't be a root device. + if cache != nil && sriovInfo.IsPF() { + cache[address] = nvdevice } return nvdevice, nil } -// Get3DControllers returns all NVIDIA 3D Controller PCI devices on the system +// Get3DControllers returns all NVIDIA 3D Controller PCI devices on the system. func (p *nvpci) Get3DControllers() ([]*NvidiaPCIDevice, error) { devices, err := p.GetAllDevices() if err != nil { @@ -364,7 +419,7 @@ func (p *nvpci) Get3DControllers() ([]*NvidiaPCIDevice, error) { return filtered, nil } -// GetVGAControllers returns all NVIDIA VGA Controller PCI devices on the system +// GetVGAControllers returns all NVIDIA VGA Controller PCI devices on the system. func (p *nvpci) GetVGAControllers() ([]*NvidiaPCIDevice, error) { devices, err := p.GetAllDevices() if err != nil { @@ -381,7 +436,7 @@ func (p *nvpci) GetVGAControllers() ([]*NvidiaPCIDevice, error) { return filtered, nil } -// GetNVSwitches returns all NVIDIA NVSwitch PCI devices on the system +// GetNVSwitches returns all NVIDIA NVSwitch PCI devices on the system. func (p *nvpci) GetNVSwitches() ([]*NvidiaPCIDevice, error) { devices, err := p.GetAllDevices() if err != nil { @@ -398,7 +453,7 @@ func (p *nvpci) GetNVSwitches() ([]*NvidiaPCIDevice, error) { return filtered, nil } -// GetGPUs returns all NVIDIA GPU devices on the system +// GetGPUs returns all NVIDIA GPU devices on the system. func (p *nvpci) GetGPUs() ([]*NvidiaPCIDevice, error) { devices, err := p.GetAllDevices() if err != nil { @@ -407,7 +462,7 @@ func (p *nvpci) GetGPUs() ([]*NvidiaPCIDevice, error) { var filtered []*NvidiaPCIDevice for _, d := range devices { - if d.IsGPU() && !d.IsVF { + if d.IsGPU() && !d.SriovInfo.IsVF() { filtered = append(filtered, d) } } @@ -415,7 +470,7 @@ func (p *nvpci) GetGPUs() ([]*NvidiaPCIDevice, error) { return filtered, nil } -// GetGPUByIndex returns an NVIDIA GPU device at a particular index +// GetGPUByIndex returns an NVIDIA GPU device at a particular index. func (p *nvpci) GetGPUByIndex(i int) (*NvidiaPCIDevice, error) { gpus, err := p.GetGPUs() if err != nil { @@ -428,3 +483,41 @@ func (p *nvpci) GetGPUByIndex(i int) (*NvidiaPCIDevice, error) { return gpus[i], nil } + +func (p *nvpci) getSriovInfoForPhysicalFunction(devicePath string) (sriovInfo SriovInfo, err error) { + totalVfsPath := filepath.Join(devicePath, "sriov_totalvfs") + numVfsPath := filepath.Join(devicePath, "sriov_numvfs") + + // No file for sriov_totalvfs exists? Not an SRIOV device, return nil + _, err = os.Stat(totalVfsPath) + if err != nil && os.IsNotExist(err) { + return sriovInfo, nil + } + sriovTotalVfs, err := os.ReadFile(totalVfsPath) + if err != nil { + return sriovInfo, fmt.Errorf("unable to read sriov_totalvfs: %v", err) + } + totalVfsStr := strings.TrimSpace(string(sriovTotalVfs)) + totalVfsInt, err := strconv.ParseUint(totalVfsStr, 10, 16) + if err != nil { + return sriovInfo, fmt.Errorf("unable to convert sriov_totalvfs to uint64: %v", err) + } + + sriovNumVfs, err := os.ReadFile(numVfsPath) + if err != nil { + return sriovInfo, fmt.Errorf("unable to read sriov_numvfs for: %v", err) + } + numVfsStr := strings.TrimSpace(string(sriovNumVfs)) + numVfsInt, err := strconv.ParseUint(numVfsStr, 10, 16) + if err != nil { + return sriovInfo, fmt.Errorf("unable to convert sriov_numvfs to uint64: %v", err) + } + + sriovInfo = SriovInfo{ + PhysicalFunction: &SriovPhysicalFunction{ + TotalVFs: totalVfsInt, + NumVFs: numVfsInt, + }, + } + return sriovInfo, nil +} diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/resources.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/resources.go index 6c6e53eec..b3b7d3155 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/resources.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/nvpci/resources.go @@ -29,7 +29,7 @@ const ( pmcBigEndian = 0x01000001 ) -// MemoryResource represents a mmio region +// MemoryResource represents a mmio region. type MemoryResource struct { Start uintptr End uintptr @@ -37,7 +37,7 @@ type MemoryResource struct { Path string } -// OpenRW read write mmio region +// OpenRW read write mmio region. func (mr *MemoryResource) OpenRW() (mmio.Mmio, error) { rw, err := mmio.OpenRW(mr.Path, 0, int(mr.End-mr.Start+1)) if err != nil { @@ -52,7 +52,7 @@ func (mr *MemoryResource) OpenRW() (mmio.Mmio, error) { return nil, fmt.Errorf("unknown endianness for mmio: %v", err) } -// OpenRO read only mmio region +// OpenRO read only mmio region. func (mr *MemoryResource) OpenRO() (mmio.Mmio, error) { ro, err := mmio.OpenRO(mr.Path, 0, int(mr.End-mr.Start+1)) if err != nil { @@ -67,7 +67,7 @@ func (mr *MemoryResource) OpenRO() (mmio.Mmio, error) { return nil, fmt.Errorf("unknown endianness for mmio: %v", err) } -// From Bit Twiddling Hacks, great resource for all low level bit manipulations +// From Bit Twiddling Hacks, great resource for all low level bit manipulations. func calcNextPowerOf2(n uint64) uint64 { n-- n |= n >> 1 @@ -83,7 +83,7 @@ func calcNextPowerOf2(n uint64) uint64 { // GetTotalAddressableMemory will accumulate the 32bit and 64bit memory windows // of each BAR and round the value if needed to the next power of 2; first -// return value is the accumulated 32bit addresable memory size the second one +// return value is the accumulated 32bit addressable memory size the second one // is the accumulated 64bit addressable memory size in bytes. These values are // needed to configure virtualized environments. func (mrs MemoryResources) GetTotalAddressableMemory(roundUp bool) (uint64, uint64) { diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/default_pci.ids b/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/default_pci.ids index 50115d979..f16867890 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/default_pci.ids +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/default_pci.ids @@ -1,8 +1,8 @@ # # List of PCI ID's # -# Version: 2023.09.22 -# Date: 2023-09-22 03:15:02 +# Version: 2024.06.23 +# Date: 2024-06-23 03:15:02 # # Maintained by Albert Pool, Martin Mares, and other volunteers from # the PCI ID Project at https://pci-ids.ucw.cz/. @@ -48,7 +48,11 @@ 7a15 Vivante GPU (Graphics Processing Unit) 7a19 PCI-to-PCI Bridge 7a24 OHCI USB Controller +# Found on 7A2000 PCH + 7a25 LG100 GPU 7a29 PCI-to-PCI Bridge +# Found on 7A2000 PCH + 7a36 Display Controller 0018 Fn-Link Technology Limited 6252 6252CPUB 802.11ax PCIe Wireless Network Adapter 001c PEAK-System Technik GmbH @@ -82,16 +86,37 @@ 8139 HNE-300 (RealTek RTL8139c) [iPaq Networking] 025e Solidigm 0b60 NVMe DC SSD [Sentinel Rock Plus controller] + 025e 8008 NVMe DC SSD U.2 15mm [D7-P5510] 025e 8208 NVMe DC SSD U.2 15mm [D7-P5810] + 025e 8d1d NVMe DC SSD E1.L 9.5mm [D5-P5316] + 025e 9008 NVMe DC SSD U.2 15mm [D7-P5520] + 025e 900c NVMe DC SSD E1.S 9.5mm [D7-P5520] + 025e 900d NVMe DC SSD E1.S 15mm [D7-P5520] + 025e 901c NVMe DC SSD E1.L 9.5mm [D7-P5520] + 025e 9108 NVMe DC SSD U.2 15mm [D7-P5620] + 025e c008 NVMe DC SSD U.2 15mm [D5-P5530] 025e d408 NVMe DC SSD U.2 15mm [D5-P5430] 025e d40c NVMe DC SSD E1.S 9.5mm [D5-P5430] 025e d419 NVMe DC SSD E3.S 7.5mm [D5-P5430] 025e d808 NVMe DC SSD U.2 15mm [D5-P5336] 025e d819 NVMe DC SSD E3.S 7.5mm [D5-P5336] + 025e d81c NVMe DC SSD E1.L 18mm [D5-P5336] 025e d81d NVMe DC SSD E1.L 9.5mm [D5-P5336] 0b70 NVMe DC SSD [Yorktown controller] - f1ab P41 Plus NVMe SSD (DRAM-less) - f1ac P44 Pro NVMe SSD + 2b59 NVMe DC SSD [Atomos Prime] + 025e 0008 NVMe DC SSD U.2-SFF 15mm [D7-PS1010] + 025e 0019 NVMe DC SSD E3.S-1T 7.5mm [D7-PS1010] + 025e 0108 NVMe DC SSD U.2-SFF 15mm [D7-PS1030] + 025e 0119 NVMe DC SSD E3.S-1T 7.5mm [D7-PS1030] + 108e 48a0 NVMe DC SSD U.2-SFF 15mm 3.84TB [D7-PS1010 Custom] + 108e 48a1 NVMe DC SSD U.2-SFF 15mm 7.68TB [D7-PS1010 Custom] + 108e 48a2 NVMe DC SSD U.2-SFF 15mm 15.36TB [D7-PS1010 Custom] + 108e 48a3 NVMe DC SSD Add-In-Card [D7-PS1030 Custom] + 108e 48a4 NVMe DC SSD E3.S-1T 7.5mm 3.84TB [D7-PS1010 Custom] + 108e 48a5 NVMe DC SSD E3.S-1T 7.5mm 7.68TB [D7-PS1010 Custom] + 108e 48a6 NVMe DC SSD E3.S-1T 7.5mm 15.36TB [D7-PS1010 Custom] + f1ab P41 Plus NVMe SSD (DRAM-less) [Echo Harbor] + f1ac P44 Pro NVMe SSD [Hollywood Beach] 0270 Hauppauge computer works Inc. (Wrong ID) 0291 Davicom Semiconductor, Inc. (Wrong ID) # SpeedStream is Efficient Networks, Inc, a Siemens Company @@ -177,6 +202,12 @@ 0a06 RCB672FXX 672-channel modular analog telephony card 0bae Bachmann electronic GmbH 0ccd Preferred Networks, Inc. + 0110 MN-Core + 0120 MN-Core 2 + 0ccd 0000 MN-Core 2 16GB + 0ccd 0010 MN-Core 2 32GB + 0200 MN-Core Direct Connect + 0201 MN-Core 2 Middle-plane 0e11 Compaq Computer Corporation 0001 PCI to EISA Bridge 0002 PCI to ISA Bridge @@ -434,6 +465,7 @@ 1028 1f07 SAS 5/iR Integrated RAID Controller 1028 1f08 SAS 5/iR Integrated RAID Controller 1028 1f09 SAS 5/iR Adapter RAID Controller + 103c 3228 SAS3080X-HP 8-port PCI-X 133MHz Host Bus Adapter with 2xSFF-8484 15ad 1976 SAS Controller 0055 SAS1068 PCI-X Fusion-MPT SAS 1033 8336 SAS1068 @@ -595,6 +627,7 @@ 1bd4 000e 6G SAS2008IR 1bd4 000f 6G SAS2008IT SA5248 1bd4 0010 6G SAS2008IR SA5248 + 4c52 96c8 LRSA96C8 8-Port SATA3(6Gb/s)Exchange Adapter (with Raid) 8086 350f RMS2LL040 RAID Controller 8086 3700 SSD 910 Series 0073 MegaRAID SAS 2008 [Falcon] @@ -744,7 +777,7 @@ 1bd4 0026 12G SAS3008IT RACK 1bd4 0027 12G SAS3008IMR RACK 1bd4 0028 12G SAS3008IR RACK - 00a5 Fusion-MPT 24GSAS/PCIe SAS40xx + 00a5 Fusion-MPT 24GSAS/PCIe SAS40xx/41xx 1000 4600 MegaRAID 9670W-16i Tri-Mode Storage Adapter 1000 4610 MegaRAID 9670-24i Tri-Mode Storage Adapter 1000 4620 MegaRAID 9660-16i Tri-Mode Storage Adapter @@ -773,6 +806,9 @@ 1028 2142 HBA465e Adapter 1028 2209 HBA465i Adapter 1028 220a HBA465i Front + 1028 22cb PERC H365i Front + 1028 22cc PERC H965i Front + 1028 22cd HBA465i Front 15d9 1d03 AOC-S4116L-H16IR (16DD/96DD) RAID Adapter 15d9 1d07 AOC-S4016L-L16IT Storage Adapter 15d9 1d08 AOC-S4016L-L16IR Storage Adapter @@ -816,6 +852,24 @@ 1000 5021 eHBA 9700W-16i 24G SAS/PCIe Storage Adapter # 9700 16 external port Storage controller 1000 5030 eHBA 9700-16e 24G SAS/PCIe Storage Adapter + 1028 22d2 PERC H975i Front + 1028 22d3 PERC H975i Adapter + 1d49 020b ThinkSystem 460-16e SAS/SATA PCIe Gen5 24Gb HBA + 00b5 Fusion-MPT 24G SAS/PCIe SAS50xx/SAS51xx +# 9760W 32 internal port RAID controller + 1000 5000 MegaRAID 9760W-32i 24G SAS/PCIe Storage Adapter +# 9760W 16 internal port RAID controller + 1000 5001 MegaRAID 9760W-16i 24G SAS/PCIe Storage Adapter +# 9760W 16 internal and 16 external port RAID controller + 1000 5010 MegaRAID 9760W-16i16e 24G SAS/PCIe Storage Adapter +# 9700W 32 internal port Storage controller + 1000 5020 eHBA 9700W-32i 24G SAS/PCIe Storage Adapter +# 9700W 16 internal port Storage controller + 1000 5021 eHBA 9700W-16i 24G SAS/PCIe Storage Adapter +# 9700 16 external port Storage controller + 1000 5030 eHBA 9700-16e 24G SAS/PCIe Storage Adapter +# Broadcom next-gen MPT PCIe switch + 00b8 Fusion-MPT Switch SAS50xx/SAS51xx 00be SAS3504 Fusion-MPT Tri-Mode RAID On Chip (ROC) 00bf SAS3404 Fusion-MPT Tri-Mode I/O Controller Chip (IOC) 00c0 SAS3324 PCI-Express Fusion-MPT SAS-3 @@ -1040,6 +1094,12 @@ 10e4 MegaRAID 12GSAS/PCIe Unsupported SAS38xx 10e5 MegaRAID 12GSAS/PCIe SAS38xx 10e6 MegaRAID 12GSAS/PCIe Secure SAS38xx + 1000 04d9 3808N iMR ROMB + 1000 04da 3808N iMR ROMB + 1000 04db 3808N iMR ROMB + 1000 04dc 3808N iMR ROMB + 1000 04dd 3808N iMR ROMB + 1000 40d8 MegaRAID 9524-8i 1000 40e0 MegaRAID 9540-2M2 1028 2172 PERC H355 Adapter 1028 2173 PERC H355 Front @@ -1052,9 +1112,12 @@ 15d9 1c6e AOC-SLG4-2H8M2 Storage Adapter 1d49 0505 ThinkSystem RAID 540-8i PCIe Gen4 12Gb Adapter 1d49 0506 ThinkSystem RAID 540-16i PCIe Gen4 12Gb Adapter + 1d49 0507 ThinkSystem RAID 545-8i PCIe Gen4 12Gb Adapter 1d49 0700 ThinkSystem M.2 RAID B540i-2i SATA/NVMe Enablement Kit 1d49 0701 ThinkSystem 7mm RAID B540p-2HS SATA/NVMe Enablement Kit 1d49 0702 ThinkSystem M.2 RAID B540p-2HS SATA/NVMe Enablement Kit + 1d49 0703 ThinkSystem M.2 RAID B540d-2HS SATA/NVMe Enablement Kit + 1d49 0704 ThinkSystem M.2 RAID B545i-2i SATA/NVMe Enablement Kit 10e7 MegaRAID 12GSAS/PCIe Unsupported SAS38xx 1960 MegaRAID 1000 0518 MegaRAID 518 SCSI 320-2 Controller @@ -1070,6 +1133,7 @@ 8086 0520 MegaRAID RAID Controller SRCU41L 8086 0523 MegaRAID RAID Controller SRCS16 3050 SAS2008 PCI-Express Fusion-MPT SAS-2 + 3150 1068e 6001 DX1 Multiformat Broadcast HD/SD Encoder/Decoder c010 PEX880xx PCIe Gen 4 Switch 1000 100b PEX88000 PCIe Gen 4 Virtual Upstream/Downstream Port @@ -1081,6 +1145,7 @@ 1000 a064 PEX88064 64 lane/port PCIe Gen 4 Switch 1000 a080 PEX88080 80 lane/port PCIe Gen 4 Switch 1000 a096 PEX88096 98 lane/port PCIe Gen 4.0 Switch + 4c52 9f48 LRNV9F48 4-port Built-in 8654 NVMe Switching Adapter c012 PEX880xx PCIe Gen 4 Switch # Virtual endpoint used in Broadcom synthetic PCIe switches for resource reservation 1000 100b PEX88000 PCIe Gen 4 Virtual Upstream/Downstream Port @@ -1166,6 +1231,8 @@ 13e9 Ariel/Navi10Lite 13f9 Oberon/Navi12Lite 13fe Cyan Skillfish [BC-250] +# Used in the Steam Deck OLED + 1435 Sephiroth [AMD Custom GPU 0405] 145a Dummy Function (absent graphics controller) 1478 Navi 10 XL Upstream Port of PCI Express Switch 1479 Navi 10 XL Downstream Port of PCI Express Switch @@ -1201,11 +1268,11 @@ 103c 8b17 ProBook 445 G9/455 G9 [Ryzen 7 Integrated Radeon GPU] 15ff Fenghuang [Zhongshan Subor Z+] 1607 Arden - 1636 Renoir + 1636 Renoir [Radeon Vega Series / Radeon Vega Mobile Series] 1637 Renoir Radeon High Definition Audio Controller 1638 Cezanne [Radeon Vega Series / Radeon Vega Mobile Series] 1043 16c2 Radeon Vega 8 -# Used in the Steam Deck +# Used in the Steam Deck LCD 163f VanGogh [AMD Custom GPU 0405] 1640 Rembrandt Radeon High Definition Audio Controller 164c Lucienne @@ -1215,6 +1282,8 @@ 1681 Rembrandt [Radeon 680M] 1714 BeaverCreek HDMI Audio [Radeon HD 6500D and 6400G-6600G series] 103c 168b ProBook 4535s + 1900 Phoenix3 + 1901 Phoenix4 3150 RV380/M24 [Mobility Radeon X600] 103c 0934 nx8220 3151 RV380 GL [FireMV 2400] @@ -2724,6 +2793,7 @@ 1028 2120 Radeon HD 6450 103c 2128 Radeon HD 6450 103c 2aee Radeon HD 7450A + 1043 047b EAH6450 SILENT/DI/1GD3(LP) 1092 6450 Radeon HD 6450 1462 2125 Radeon HD 6450 1462 2346 Radeon HD 7450 @@ -3721,6 +3791,7 @@ 6980 Polaris12 6981 Lexa XT [Radeon PRO WX 3200] 6985 Lexa XT [Radeon PRO WX 3100] + 103c 83b5 Radeon PRO WX 3100 6986 Polaris12 6987 Lexa [Radeon 540X/550X/630 / RX 640 / E9171 MCM] 698f Lexa XT [Radeon PRO WX 3100 / Barco MXRT 4700] @@ -3728,6 +3799,7 @@ 699f Lexa PRO [Radeon 540/540X/550/550X / RX 540X/550/550X] 1028 1720 Radeon RX 550X 148c 2380 Lexa XL [Radeon RX 550] + 17aa 5069 Thinkpad E480/E580 1da2 e367 Lexa PRO [Radeon RX 550] 69a0 Vega 12 69a1 Vega 12 @@ -3909,8 +3981,11 @@ 73a3 Navi 21 GL-XL [Radeon PRO W6800] 73a4 Navi 21 USB 73a5 Navi 21 [Radeon RX 6950 XT] +# Reference + 1002 0e3a Radeon RX 6950 XT 1849 5230 Navi 21 [ASRock OC Forumla Radeon RX 6950XT] 1da2 441d Navi 21 [Sapphire Nitro+ Radeon RX 6950 XT] + 1eae 6950 Navi 21 [XFX Speedster MERC319 Radeon RX 6950 XT] 73ab Navi 21 Pro-XLA [Radeon Pro W6800X/Radeon Pro W6800X Duo] 73ae Navi 21 [Radeon Pro V620 MxGPU] 73af Navi 21 [Radeon RX 6900 XT] @@ -3930,11 +4005,17 @@ 1043 16c2 Radeon RX 6800M 1458 2408 Radeon RX 6750 XT GAMING OC 12G 1462 3980 Radeon RX 6700 XT Mech 2X 12G [MSI] + 148c 2409 Red Devil RX 6700 XT +# Dual fan version + 1849 5210 Radeon RX 6700 XT Challenger D 1849 5219 Radeon RX 6700 XT Challenger D 1849 5222 RX 6700 XT Challenger D OC +# Gaming 1440/QHD Overclock edition with 12 Gb GDDR6 and PCIe 4.0 of Radeon RX 6700 XT by Sapphire PULSE manufactured on autumn 2022 / C1 reviseion + 1da2 445e Radeon RX 6700 XT GAMING OC 12G [Sapphire PULSE] 1da2 465e Radeon RX 6750 XT PULSE OC 1da2 e445 Sapphire Radeon RX 6700 1eae 6601 Speedster QICK 319 RX 6700 XT + 1eae 661a Radeon RX 6700 [SPEEDSTER SWFT 309] 73e0 Navi 23 73e1 Navi 23 WKS-XM [Radeon PRO W6600M] 73e3 Navi 23 WKS-XL [Radeon PRO W6600] @@ -3944,6 +4025,7 @@ 1849 5236 RX 6650 XT Challenger D OC 73f0 Navi 33 [Radeon RX 7600M XT] 73ff Navi 23 [Radeon RX 6600/6600 XT/6600M] + 1462 5021 MSI RX 6600XT MECH 2X 1462 5022 RX 6600 MECH 2X 148c 2412 PowerColor Red Devil RX 6600 XT 1849 5218 Radeon RX 6600 Challenger ITX 8GB @@ -3959,16 +4041,24 @@ 1da2 e457 PULSE AMD Radeon RX 6500 XT 7446 Navi 31 USB 7448 Navi 31 [Radeon Pro W7900] - 744c Navi 31 [Radeon RX 7900 XT/7900 XTX] + 744c Navi 31 [Radeon RX 7900 XT/7900 XTX/7900M] + 1002 0e3b RX 7900 GRE [XFX] + 1043 0506 TUF Gaming Radeon RX 7900 XTX OC + 1849 5304 Radeon RX 7900 XTX 1da2 471e PULSE RX 7900 XTX + 1da2 475e PULSE RX 7900 GRE 1da2 e471 NITRO+ RX 7900 XTX Vapor-X 1eae 7901 RX-79XMERCB9 [SPEEDSTER MERC 310 RX 7900 XTX] 745e Navi 31 [Radeon Pro W7800] + 7460 7460 Navi32 GL-XL [AMD Radeon PRO V710] + 7470 Navi 32 [Radeon PRO W7700] 747e Navi 32 [Radeon RX 7700 XT / 7800 XT] - 7480 Navi 33 [Radeon RX 7700S/7600/7600S/7600M XT/PRO W7600] + 7480 Navi 33 [Radeon RX 7600/7600 XT/7600M XT/7600S/7700S / PRO W7600] 1849 5313 RX 7600 Challenger OC 7483 Navi 33 [Radeon RX 7600M/7600M XT] 7489 Navi 33 [Radeon Pro W7500] + 74a0 Aqua Vanjaram [Instinct MI300A] + 74a1 Aqua Vanjaram [Instinct MI300X] 7833 RS350 Host Bridge 7834 RS350 [Radeon 9100 PRO/XT IGP] 7835 RS350M [Mobility Radeon 9000 IGP] @@ -4352,6 +4442,7 @@ aa90 Turks HDMI Audio [Radeon HD 6500/6600 / 6700M Series] 1028 04a3 Precision M4600 aa98 Caicos HDMI Audio [Radeon HD 6450 / 7450/8450/8490 OEM / R5 230/235/235X OEM] + 1043 aa98 EAH6450 SILENT/DI/1GD3(LP) 174b aa98 Radeon HD 6450 1GB DDR3 aaa0 Tahiti HDMI Audio [Radeon HD 7870 XT / 7950/7970] aab0 Oland/Hainan/Cape Verde/Pitcairn HDMI Audio [Radeon HD 7000 Series] @@ -4815,6 +4906,7 @@ 1014 04fb PCIe3 x16 20GB Cache 12Gb Quad SAS RAID+ Adapter(580B) 1014 04fc PCIe3 x8 12Gb Quad SAS RAID+ Adapter(580A) 04ed Internal Shared Memory (ISM) virtual PCI device + 0611 4769 Cryptographic Adapter 3022 QLA3022 Network Adapter 4022 QLA3022 Network Adapter ffff MPIC-2 interrupt controller @@ -4824,6 +4916,7 @@ 5343 SPEA 3D Accelerator 1018 Unisys Systems 1019 Elitegroup Computer Systems + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 101a AT&T GIS (NCR) 0005 100VG ethernet 0007 BYNET BIC4G/2C/2G @@ -5281,7 +5374,7 @@ 1646 VanGogh IOMMU 1647 VanGogh PCIe GPP Bridge 1648 VanGogh Internal PCIe GPP Bridge to Bus - 1649 VanGogh PSP/CCP + 1649 Family 19h PSP/CCP 164f Milan IOMMU 1650 Milan Data Fabric; Function 0 1651 Milan Data Fabric; Function 1 @@ -5334,7 +5427,7 @@ 1716 Family 12h/14h Processor Function 5 1718 Family 12h/14h Processor Function 6 1719 Family 12h/14h Processor Function 7 - 2000 79c970 [PCnet32 LANCE] + 2000 79C97x [PCnet32 LANCE] 1014 2000 NetFinity 10/100 Fast Ethernet 1022 2000 PCnet - Fast 79C971 103c 104c Ethernet with LAN remote power Adapter @@ -5348,7 +5441,7 @@ 1259 2454 AT-2450v4 10Mb Ethernet Adapter 1259 2700 AT-2700TX 10/100 Fast Ethernet 1259 2701 AT-2700FX 100Mb Ethernet - 1259 2702 AT-2700FTX 10/100 Mb Fiber/Copper Fast Ethernet + 1259 2702 AT-2700FTX (AM79C976KD [PCnet-PRO] chipset) 10/100 Mb Fiber/Copper Fast Ethernet 1259 2703 AT-2701FX 1259 2704 AT-2701FTX 10/100 Mb Fiber/Copper Fast Ethernet 4c53 1000 CC7/CR7/CP7/VC7/VP7/VR7 mainboard @@ -5362,7 +5455,7 @@ 1092 0a78 Multimedia Home Network Adapter 1668 0299 ActionLink Home Network Adapter 2003 Am 1771 MBW [Alchemy] - 2020 53c974 [PCscsi] + 2020 AM53/79C974 [PC-SCSI] 1af4 1100 QEMU Virtual Machine 2040 79c974 2080 CS5536 [Geode companion] Host Bridge @@ -5392,10 +5485,12 @@ 1849 43c8 Fatal1ty X370 Professional Gaming 43b6 X399 Series Chipset SATA Controller 43b7 300 Series Chipset SATA Controller + 43b8 A320 Chipset SATA Controller [AHCI mode] 43b9 X370 Series Chipset USB 3.1 xHCI Controller 1849 43d0 Fatal1ty X370 Professional Gaming 43ba X399 Series Chipset USB 3.1 xHCI Controller 43bb 300 Series Chipset USB 3.1 xHCI Controller + 43bc A320 USB 3.1 XHCI Host Controller 43c6 400 Series Chipset PCIe Bridge 43c7 400 Series Chipset PCIe Port 43c8 400 Series Chipset SATA Controller @@ -5407,6 +5502,10 @@ 43ee 500 Series Chipset USB 3.1 XHCI Controller # maybe 1b21 1142 ASM1042A USB 3.0 Host Controller + 43f4 600 Series Chipset PCIe Switch Upstream Port + 43f5 600 Series Chipset PCIe Switch Downstream Port + 43f6 600 Series Chipset SATA Controller + 43f7 600 Series Chipset USB 3.2 Controller 57a3 Matisse PCIe GPP Bridge 57a4 Matisse PCIe GPP Bridge 57ad Matisse Switch Upstream @@ -5575,6 +5674,8 @@ 9609 RS780/RS880 PCI to PCI bridge (PCIE port 5) 960a RS780 PCI to PCI bridge (NB-SB link) 960b RS780 PCI to PCI bridge (ext gfx port 1) +# Takes over NVMe PCI ID when RAID is enabled + b000 RAID Bottom Device 1023 Trident Microsystems 0194 82C194 2000 4DWave DX @@ -6393,6 +6494,7 @@ 103c Hewlett-Packard Company 1005 A4977A Visualize EG 1008 Visualize FX + 1020 548XX Scope Interface 1028 Tach TL Fibre Channel Host Adapter 1029 Tach XL2 Fibre Channel Host Adapter 107e 000f Interphase 5560 Fibre Channel Adapter @@ -6549,8 +6651,6 @@ 3010 Samurai_1 3020 Samurai_IDE 1043 ASUSTeK Computer Inc. - 0464 Radeon R9 270x GPU - 0521 RX580 [RX 580 Dual O8G] 0675 ISDNLink P-IN100-ST-D 0675 1704 ISDN Adapter (PCI Bus, D, C) 0675 1707 ISDN Adapter (PCI Bus, DV, W) @@ -7009,6 +7109,7 @@ 90dc Baikal DMA Controller 90dd Baikal Memory (DDR3/SPM) 90de Baikal USB 3.0 xHCI Host Controller + 90eb CXD90062GG 9121 Nextorage NEM-PA NVMe SSD for PlayStation 104e Oak Technology, Inc 0017 OTI-64017 @@ -7240,6 +7341,7 @@ c350 80333 [SuperTrak EX12350] e350 80333 [SuperTrak EX24350] 105b Foxconn International, Inc. + 9602 RS780/RS880 PCI to PCI bridge (int gfx) e0c3 T99W175 5G Modem [Snapdragon X55] 105c Wipro Infotech Limited 105d Number 9 Computer Company @@ -7481,7 +7583,7 @@ 1076 Chaintech Computer Co. Ltd 1077 QLogic Corp. 1016 ISP10160 Single Channel Ultra3 SCSI Processor - 1020 ISP1020 Fast-wide SCSI + 1020 ISP1020/1040 Fast-wide SCSI 1022 ISP1022 Fast-wide SCSI 1080 ISP1080 SCSI Host Adapter 1216 ISP12160 Dual Channel Ultra3 SCSI Processor @@ -8439,6 +8541,7 @@ 764d PXI-2521 764e PXI-2522 764f PXI-2523 + 7652 PXIe-4080 7654 PXI-2796 7655 PXI-2797 7656 PXI-2798 @@ -8453,7 +8556,14 @@ 76a3 PXIe-6535B 76a4 PXIe-6536B 76a5 PXIe-6537B + 76d8 PXIe-4081 + 76d9 PXIe-4082 + 77a8 PXIe-6375 783e PXI-8368 + 7882 PXIe-6376 + 7883 PXIe-6378 + 799e PXIe-6386 + 799f PXIe-6396 9020 PXI-2501 9030 PXI-2503 9040 PXI-2527 @@ -8748,6 +8858,8 @@ 13e9 0070 Win/TV (Video Section) 036e Bt878 Video Capture 0000 0001 Euresys Picolo PCIe + 0000 0002 Euresys PICOLO Pro 2 + 0000 0004 Euresys PICOLO Pro 3E 0070 13eb WinTV Series 0070 ff01 Viewcast Osprey 200 0071 0101 DigiTV PCI @@ -8768,6 +8880,23 @@ 14f1 0002 Bt878 Mediastream Controller PAL BG 14f1 0003 Bt878a Mediastream Controller PAL BG 14f1 0048 Bt878/832 Mediastream Controller + 1805 0101 Euresys PICOLO Tetra + 1805 0102 Euresys PICOLO Tetra + 1805 0103 Euresys PICOLO Tetra + 1805 0104 Euresys PICOLO Tetra + 1805 0105 Euresys PICOLO Tetra + 1805 0106 Euresys PICOLO Tetra + 1805 0107 Euresys PICOLO Tetra + 1805 0108 Euresys PICOLO Tetra + 1805 0201 Euresys PICOLO Tetra-X + 1805 0202 Euresys PICOLO Tetra-X + 1805 0203 Euresys PICOLO Tetra-X + 1805 0204 Euresys PICOLO Tetra-X + 1805 0401 Euresys PICOLO Tymo + 1805 0402 Euresys PICOLO Tymo + 1805 0403 Euresys PICOLO Tymo + 1805 0404 Euresys PICOLO Tymo + 1805 1001 Euresys PICOLO Junior 4 1822 0001 VisionPlus DVB card 1851 1850 FlyVideo'98 - Video 1851 1851 FlyVideo II @@ -8843,6 +8972,8 @@ 1852 1852 FlyVideo'98 (with FM Tuner) 0878 Bt878 Audio Capture 0000 0001 Euresys Picolo PCIe + 0000 0002 Euresys PICOLO Pro 2 (Audio Section) + 0000 0004 Euresys PICOLO Pro 3E (Audio Section) 0070 13eb WinTV Series 0070 ff01 Viewcast Osprey 200 0071 0101 DigiTV PCI @@ -8865,6 +8996,23 @@ 14f1 0002 Bt878 Video Capture (Audio Section) 14f1 0003 Bt878 Video Capture (Audio Section) 14f1 0048 Bt878 Video Capture (Audio Section) + 1805 0101 Euresys PICOLO Tetra (Audio Section) + 1805 0102 Euresys PICOLO Tetra (Audio Section) + 1805 0103 Euresys PICOLO Tetra (Audio Section) + 1805 0104 Euresys PICOLO Tetra (Audio Section) + 1805 0105 Euresys PICOLO Tetra (Audio Section) + 1805 0106 Euresys PICOLO Tetra (Audio Section) + 1805 0107 Euresys PICOLO Tetra (Audio Section) + 1805 0108 Euresys PICOLO Tetra (Audio Section) + 1805 0201 Euresys PICOLO Tetra-X (Audio Section) + 1805 0202 Euresys PICOLO Tetra-X (Audio Section) + 1805 0203 Euresys PICOLO Tetra-X (Audio Section) + 1805 0204 Euresys PICOLO Tetra-X (Audio Section) + 1805 0401 Euresys PICOLO Tymo (Audio Section) + 1805 0402 Euresys PICOLO Tymo (Audio Section) + 1805 0403 Euresys PICOLO Tymo (Audio Section) + 1805 0404 Euresys PICOLO Tymo (Audio Section) + 1805 1001 Euresys PICOLO Junior 4 (Audio Section) 1822 0001 VisionPlus DVB Card 18ac d500 DViCO FusionHDTV5 Lite 270f fc00 Digitop DTT-1000 @@ -8991,6 +9139,10 @@ 1147 VScom 020 2 port parallel adaptor 2000 PCI9030 32-bit 33MHz PCI <-> IOBus Bridge 10b5 9030 ATCOM AE400P Quad E1 PCI card + 2300 Euresys DOMINO Gamma + 2374 Euresys DOMINO Alpha + 2491 Euresys GRABLINK Value + 2493 Euresys GRABLINK Expert 2540 IXXAT CAN-Interface PC-I 04/PCI 2724 Thales PCSM Security Card 3376 Cosateq 4 Port CAN Card @@ -9061,12 +9213,17 @@ 8717 PEX 8717 16-lane, 8-Port PCI Express Gen 3 (8.0 GT/s) Switch with DMA 8718 PEX 8718 16-Lane, 5-Port PCI Express Gen 3 (8.0 GT/s) Switch 8724 PEX 8724 24-Lane, 6-Port PCI Express Gen 3 (8 GT/s) Switch, 19 x 19mm FCBGA + 4c52 9234 LRNV9324 2-port Built-in 8643 NVMe Exchange Adapter + 4c52 9524 LRNV9524 2-port M.2 NVMe SSD Exchange Adapter 8725 PEX 8725 24-Lane, 10-Port PCI Express Gen 3 (8.0 GT/s) Multi-Root Switch with DMA 8732 PEX 8732 32-lane, 8-Port PCI Express Gen 3 (8.0 GT/s) Switch 8734 PEX 8734 32-lane, 8-Port PCI Express Gen 3 (8.0GT/s) Switch 8747 PEX 8747 48-Lane, 5-Port PCI Express Gen 3 (8.0 GT/s) Switch + 4c52 9347 LRNV9347L 2-port Built-in 8643 NVMe Switching Adapter + 4c52 9547 LRNV9547 4-port M.2 NVMe SSD Exchange Adapter 8748 PEX 8748 48-Lane, 12-Port PCI Express Gen 3 (8 GT/s) Switch, 27 x 27mm FCBGA 8749 PEX 8749 48-Lane, 18-Port PCI Express Gen 3 (8.0 GT/s) Multi-Root Switch with DMA + 4c52 9349 LRNV9349 8-port SFF-8643 NVMe SSD Exchange Adapter 87a0 PEX PCI Express Switch NT0 Port Link Interface 87a1 PEX PCI Express Switch NT1 Port Link Interface 87b0 PEX PCI Express Switch NT0 Port Virtual Interface @@ -9075,6 +9232,7 @@ 87d0 PEX PCI Express Switch DMA interface 9016 PLX 9016 8-port serial controller 9030 PCI9030 32-bit 33MHz PCI <-> IOBus Bridge + 10b5 1205 Becker & Hickl MSA-1000 10b5 2695 Hilscher CIF50-PB/DPS Profibus 10b5 2862 Alpermann+Velte PCL PCI LV (3V/5V): Timecode Reader Board 10b5 2906 Alpermann+Velte PCI TS (3V/5V): Time Synchronisation Board @@ -9106,10 +9264,17 @@ e1c5 0006 TA1-PCI4 9036 9036 9050 PCI <-> IOBus Bridge + 103c 10b0 82350 PCI GPIB 10b5 1067 IXXAT CAN i165 10b5 114e Wasco WITIO PCI168extended 10b5 1169 Wasco OPTOIO32standard 32 digital in, 32 digital out + 10b5 1171 Becker & Hickl PMS-400 10b5 1172 IK220 (Heidenhain) + 10b5 1201 Becker & Hickl SPC-6x0 + 10b5 1202 Becker & Hickl SPC-7x0 + 10b5 1203 Becker & Hickl MSA-300 + 10b5 1206 Becker & Hickl DCC-100 + 10b5 120a Becker & Hickl STP-340 10b5 2036 SatPak GPS 10b5 2221 Alpermann+Velte PCL PCI LV: Timecode Reader Board 10b5 2273 SH ARC-PCI SOHARD ARCNET card @@ -9117,6 +9282,7 @@ 10b5 2905 Alpermann+Velte PCI TS: Time Synchronisation Board 10b5 3196 Goramo PLX200SYN sync serial card 10b5 9050 PCI-I04 PCI Passive PC/CAN Interface + 11a9 5334 PDS4 12fe 0001 CAN-PCI/331 CAN bus controller 1369 8901 PCX11+ PCI 1369 8f01 VX222 @@ -9156,6 +9322,11 @@ d84d 4078 EX-4078 2S(16C552) RS-232+1P 9052 PCI9052 PCI <-> IOBus Bridge 9054 PCI9054 32-bit 33MHz PCI <-> IOBus Bridge + 10b5 1171 Becker & Hickl PMS-400A + 10b5 1208 Becker & Hickl SPC-830 + 10b5 120e Becker & Hickl SPC-930 + 10b5 120f Becker & Hickl SPC-150 + 10b5 1210 Becker & Hickl DPC-230 10b5 2455 Wessex Techology PHIL-PCI 10b5 2696 Innes Corp AM Radcap card 10b5 2717 Innes Corp Auricon card @@ -9542,6 +9713,7 @@ 10be Tseng Labs International Co. 10bf Most Inc 10c0 Boca Research Inc. + 9135 iX3D Ultimate Rez 10c1 ICM Co., Ltd. 10c2 Auspex Systems Inc. 10c3 Samsung Semiconductors, Inc. @@ -10256,6 +10428,7 @@ 1043 402f AGP-V8200 DDR 1048 0c70 GLADIAC 920 0201 NV20 [GeForce3 Ti 200] + 1462 8503 G3Ti200 Pro VT128 0202 NV20 [GeForce3 Ti 500] 1043 405b V8200 T5 1545 002f Xtasy 6964 @@ -11555,6 +11728,7 @@ 1025 0753 GeForce GT 620M 1025 0754 GeForce GT 620M 17aa 3977 GeForce GT 640M LE + 1b0a 20c6 GeForce GT 630M 1b0a 2210 GeForce GT 635M 0dea GF108M [GeForce 610M] 17aa 365a GeForce 615 @@ -11611,6 +11785,9 @@ 0f02 GF108 [GeForce GT 730] 0f03 GF108 [GeForce GT 610] 0f06 GF108 [GeForce GT 730] + 0fa0 GK11x [GK11x_FPGA] + 0fa5 GK11x + 0fa7 GK11x [Tegra on x86 (PEATRANS)] 0fb0 GM200 High Definition Audio 0fb8 GP108 High Definition Audio Controller 0fb9 GP107GL High Definition Audio Controller @@ -11620,13 +11797,18 @@ 0fc0 GK107 [GeForce GT 640 OEM] 0fc1 GK107 [GeForce GT 640] 0fc2 GK107 [GeForce GT 630 OEM] + 0fc4 GK107 [D14P1-15] 0fc5 GK107 [GeForce GT 1030] 0fc6 GK107 [GeForce GTX 650] 1043 8428 GTX650-DC-1GD5 0fc8 GK107 [GeForce GT 740] 0fc9 GK107 [GeForce GT 730] + 0fcb GK107 [EXK107] + 0fcc GK107 [GeForce GT 720] 0fcd GK107M [GeForce GT 755M] 0fce GK107M [GeForce GT 640M LE] + 0fcf GK107 [GEN3 ESI] + 0fd0 GK107 [NB1G] 0fd1 GK107M [GeForce GT 650M] 1043 1597 GeForce GT 650M 1043 15a7 GeForce GT 650M @@ -11641,10 +11823,15 @@ 0fd3 GK107M [GeForce GT 640M LE] 0fd4 GK107M [GeForce GTX 660M] 0fd5 GK107M [GeForce GT 650M Mac Edition] - 0fd6 GK107M + 0fd6 GK107M [N13P-GS-W] + 0fd7 GK107 [GK107-GTX] 0fd8 GK107M [GeForce GT 640M Mac Edition] 0fd9 GK107M [GeForce GT 645M] - 0fdb GK107M + 0fda GK107 [GK107-ES-A1] + 0fdb GK107 [GK107-ESP-A1] + 0fdc GK107 [GK107-INT22-A1] + 0fdd GK107 [GK107-INT11-A1] + 0fde GK107 [GK107-ES-KA-E1] 0fdf GK107M [GeForce GT 740M] 0fe0 GK107M [GeForce GTX 660M Mac Edition] 0fe1 GK107M [GeForce GT 730M] @@ -11665,6 +11852,7 @@ 0fed GK107M [GeForce 820M] 0fee GK107M [GeForce 810M] 0fef GK107GL [GRID K340] + 0ff0 GK107 [NB1Q] 0ff1 GK107 [NVS 1000] 0ff2 GK107GL [GRID K1] 0ff3 GK107GL [Quadro K420] @@ -12184,9 +12372,13 @@ 11a2 GK104M [GeForce GTX 675MX Mac Edition] 11a3 GK104M [GeForce GTX 680MX] 106b 010d iMac 13,2 + 11a4 GK104 [GK104-ESA] + 11a5 GK104 [GK104-ESA] 11a7 GK104M [GeForce GTX 675MX] 11a8 GK104GLM [Quadro K5100M] 11a9 GK104M [GeForce GTX 870M] + 11aa GK104 [GK104-INT] + 11ac GK104 [GK104-CS] 11af GK104GLM [GRID IceCube] 11b0 GK104GL [GRID K240Q / K260Q vGPU] 10de 101a GRID K240Q @@ -12204,6 +12396,7 @@ 11be GK104GLM [Quadro K3000M] 11bf GK104GL [GRID K2] 11c0 GK106 [GeForce GTX 660] + 11c1 GK106 [D14P2-30] 11c2 GK106 [GeForce GTX 650 Ti Boost] 1043 845b GeForce GTX 650 Ti Boost DirectCU II OC 1462 2874 GeForce GTX 650 Ti Boost TwinFrozr II OC @@ -12219,6 +12412,10 @@ 11c7 GK106 [GeForce GTX 750 Ti] 11c8 GK106 [GeForce GTX 650 OEM] 11cb GK106 [GeForce GT 740] + 11d0 GK106 [GK106-INT353] + 11d1 GK106 [GK106-INT343] + 11d2 GK106 [GK106-INT232] + 11d3 GK106 [GK106-ES] 11e0 GK106M [GeForce GTX 770M] 11e1 GK106M [GeForce GTX 765M] 11e2 GK106M [GeForce GTX 765M] @@ -12227,6 +12424,7 @@ 11e7 GK106M 11fa GK106GL [Quadro K4000] 11fc GK106GLM [Quadro K2100M] + 11ff GK106 [NB1Q] 1200 GF114 [GeForce GTX 560 Ti] 1201 GF114 [GeForce GTX 560] 1202 GF114 [GeForce GTX 560 Ti OEM] @@ -12274,7 +12472,9 @@ 1280 GK208 [GeForce GT 635] 1281 GK208 [GeForce GT 710] 1282 GK208 [GeForce GT 640 Rev. 2] + 1283 GK208 [D15M2-10] 1284 GK208 [GeForce GT 630 Rev. 2] + 1285 GK208 [GK208-100] 1286 GK208 [GeForce GT 720] 1287 GK208B [GeForce GT 730] 1288 GK208B [GeForce GT 720] @@ -12314,8 +12514,14 @@ 17aa 36af GeForce 920M 129a GK208BM [GeForce 910M] 12a0 GK208 + 12ad GK208 [GK208-ES] + 12ae GK208 [GK208-CS1-C] + 12af GK208 [GK208-INT] + 12b0 GK208 [GK208-CS-Q] + 12b1 GK208 [GK208 INT] 12b9 GK208GLM [Quadro K610M] 12ba GK208GLM [Quadro K510M] + 130b GK110 [Q12U-1] 1340 GM108M [GeForce 830M] 103c 2b2b GeForce 830A 1341 GM108M [GeForce 840M] @@ -12360,6 +12566,8 @@ 103c 2b4c GeForce GTX 960A 139c GM107M [GeForce 940M] 139d GM107M [GeForce GTX 750 Ti] + 13ad GM204 [GM107 INT52] + 13ae GM204 [GM107 CS1] 13b0 GM107GLM [Quadro M2000M] 13b1 GM107GLM [Quadro M1000M] 13b2 GM107GLM [Quadro M600M] @@ -12374,15 +12582,19 @@ 10de 110a GRID M40 10de 1160 Tesla M10 10de 11d2 GRID M10-8Q + 13be GM204 [GM107 CS1] + 13bf GM204 [GM107 INT52] 13c0 GM204 [GeForce GTX 980] 1043 8504 GTX980-4GD5 13c1 GM204 13c2 GM204 [GeForce GTX 970] 13c3 GM204 + 13c4 GM204 [D17U-20] 13d7 GM204M [GeForce GTX 980M] 13d8 GM204M [GeForce GTX 960 OEM / 970M] 13d9 GM204M [GeForce GTX 965M] 13da GM204M [GeForce GTX 980 Mobile] + 13e4 GM204 [Graphics Device ES-A] 13e7 GM204GL [GeForce GTX 980 Engineering Sample] 13f0 GM204GL [Quadro M5000] 13f1 GM204GL [Quadro M4000] @@ -12408,11 +12620,16 @@ 1430 GM206GL [Quadro M2000] 1431 GM206GL [Tesla M4] 1436 GM206GLM [Quadro M2200 Mobile] + 15c2 GP100 [CMP 100-100] 15f0 GP100GL [Quadro GP100] 15f1 GP100GL 15f7 GP100GL [Tesla P100 PCIe 12GB] 15f8 GP100GL [Tesla P100 PCIe 16GB] 15f9 GP100GL [Tesla P100 SXM2 16GB] + 15fa GP100GL [DGX Station / PH402 SKU 200] + 15fb GP100GL [GP100 SKU 200] + 15fc GP100GL [Tesla P100-DGXS-16GB] + 15ff GP100GL [GP100 SKU 15ff] 1617 GM204M [GeForce GTX 980M] 1618 GM204M [GeForce GTX 970M] 1619 GM204M [GeForce GTX 965M] @@ -12571,6 +12788,7 @@ 1d81 GV100 [TITAN V] 1d83 GV100 [CMP 100-200] 1d84 GV100 [CMP 100-210] + 1db0 GV100GL [Tesla GV100 SXM2-16GB SKU 890] 1db1 GV100GL [Tesla V100 SXM2 16GB] 1db2 GV100GL [Tesla V100 DGXS 16GB] 1db3 GV100GL [Tesla V100 FHHL 16GB] @@ -12582,10 +12800,12 @@ 10de 131d Tesla V100-SXM3-32GB-H 1dba GV100GL [Quadro GV100] 10de 12eb TITAN V CEO Edition + 1dbd GV100GL [Tesla GV100 DGX1-V] 1dbe GV100 Engineering Sample 1dc1 GV100 [CMP 100-200] 1df0 GV100GL [Tesla PG500-216] 1df2 GV100GL [Tesla PG503-216] + 1df4 GV100 [CMP 100-210] 1df5 GV100GL [Tesla V100 SXM2 16GB] 1df6 GV100GL [Tesla V100S PCIe 32GB] 1e02 TU102 [TITAN RTX] @@ -12599,6 +12819,7 @@ 1e30 TU102GL [Quadro RTX 6000/8000] 10de 129e Quadro RTX 8000 10de 12ba Quadro RTX 6000 + 1e35 TU102GL [Tesla T10] 1e36 TU102GL [Quadro RTX 6000] 1e37 TU102GL [Tesla T10 16GB / GRID RTX T10-2/T10-4/T10-8] 10de 1304 Tesla T10 16GB @@ -12696,7 +12917,7 @@ 1fd9 TU117BM [GeForce GTX 1650 Mobile Refresh] 1fdd TU117BM [GeForce GTX 1650 Mobile Refresh] 1ff0 TU117GL [T1000 8GB] - 1ff2 TU117GL [T400 4GB] + 1ff2 TU117GL [T400 4GB / T400E] 1ff9 TU117GLM [Quadro T1000 Mobile] 2080 GA100 2081 GA100 @@ -12737,6 +12958,7 @@ 21ae TU116GL 21bf TU116GL 21c2 TU116 + 21c3 TU116 21c4 TU116 [GeForce GTX 1660 SUPER] 21d1 TU116BM [GeForce GTX 1660 Ti Mobile] 2200 GA102 @@ -12769,25 +12991,34 @@ 2296 Tegra PCIe Endpoint Virtual Network 22a3 GH100 [H100 NVSwitch] 22ba AD102 High Definition Audio Controller + 22bc AD104 High Definition Audio Controller + 22bd AD106M High Definition Audio Controller 2302 GH100 2313 GH100 [H100 CNX] 2321 GH100 [H100L 94GB] 2322 GH100 [H800 PCIe] 2324 GH100 [H800] + 2329 GH100 [H20] 2330 GH100 [H100 SXM5 80GB] 2331 GH100 [H100 PCIe] + 2335 GH100 [H200 SXM 141GB] 2336 GH100 [H100] 2337 GH100 [H100 SXM5 64GB] + 2338 GH100 [H100 SXM5 96GB] 2339 GH100 [H100 SXM5 94GB] 233a GH100 [H800L 94GB] 233d GH100 [H100 96GB] - 2342 GH100 [GH200 120GB] + 2342 GH100 [GH200 120GB / 480GB] 2343 GH100 - 2345 GH100 [GH200 480GB] + 2345 GH100 [GH100-88K-A1] + 237f GH100 [Skinny Joe] + 23b0 GH100 + 23f0 GH100 2414 GA103 [GeForce RTX 3060 Ti] 2420 GA103M [GeForce RTX 3080 Ti Mobile] 2438 GA103GLM [RTX A5500 Laptop GPU] 2460 GA103M [GeForce RTX 3080 Ti Laptop GPU] + 2480 GA104 [Reserved Dev ID A] 2482 GA104 [GeForce RTX 3070 Ti] 2483 GA104 2484 GA104 [GeForce RTX 3070] @@ -12799,6 +13030,9 @@ 2488 GA104 [GeForce RTX 3070 Lite Hash Rate] 2489 GA104 [GeForce RTX 3060 Ti Lite Hash Rate] 248a GA104 [CMP 70HX] + 248c GA104 [GeForce RTX 3070 Ti] + 248d GA104 [GeForce RTX 3070] + 248e GA104 [GeForce RTX 3060 Ti] 249c GA104M [GeForce RTX 3080 Mobile / Max-Q 8GB/16GB] 249d GA104M [GeForce RTX 3070 Mobile / Max-Q] 249f GA104M @@ -12816,6 +13050,7 @@ 24ba GA104GLM [RTX A4500 Laptop GPU] 24bb GA104GLM [RTX A3000 Laptop GPU] 24bf GA104 [GeForce RTX 3070 Engineering Sample] + 24c0 GA104 [Initial Dev ID B] 24c7 GA104 [GeForce RTX 3060 8GB] 24c8 GA104 [GeForce RTX 3070 GDDR6X] 24c9 GA104 [GeForce RTX 3060 Ti GDDR6X] @@ -12843,6 +13078,7 @@ 2571 GA106 [RTX A2000 12GB] 2582 GA107 [GeForce RTX 3050 8GB] 2583 GA107 [GeForce RTX 3050 4GB] + 2584 GA107 [GeForce RTX 3050 6GB] 25a0 GA107M [GeForce RTX 3050 Ti Mobile] 25a2 GA107M [GeForce RTX 3050 Mobile] 25a3 GA107 @@ -12856,6 +13092,8 @@ 25ac GN20-P0-R-K2 [GeForce RTX 3050 6GB Laptop GPU] 25ad GA107 [GeForce RTX 2050] 25af GA107 [GeForce RTX 3050 Engineering Sample] + 25b0 GA107GL [RTX A1000] + 25b2 GA107GL [RTX A400] 25b5 GA107GLM [RTX A4 Mobile] # A16 - 25B6 10DE 14A9 / A2 - 25B6 10DE 157E 25b6 GA107GL [A2 / A16] @@ -12875,23 +13113,36 @@ 25fb GA107 [RTX A500 Embedded GPU] 2681 AD102 [RTX TITAN Ada] 2684 AD102 [GeForce RTX 4090] + 2685 AD102 [GeForce RTX 4090 D] + 2689 AD102 [GeForce RTX 4070 Ti SUPER] 26b1 AD102GL [RTX 6000 Ada Generation] 26b2 AD102GL [RTX 5000 Ada Generation] + 26b3 AD102GL [RTX 5880 Ada Generation] 26b5 AD102GL [L40] + 26b7 AD102GL [L20] 26b8 AD102GL [L40G] 26b9 AD102GL [L40S] + 26ba AD102GL [L20] 26f5 AD102GL [L40 CNX] + 2702 AD103 [GeForce RTX 4080 SUPER] + 2703 AD103 [GeForce RTX 4080 SUPER] 2704 AD103 [GeForce RTX 4080] + 2705 AD103 [GeForce RTX 4070 Ti SUPER] + 2709 AD103 [GeForce RTX 4070] 2717 GN21-X11 [GeForce RTX 4090 Laptop GPU] 2730 AD103GLM [RTX 5000 Ada Generation Laptop GPU] 2757 GN21-X11 2770 AD103GLM [RTX 5000 Ada Generation Embedded GPU] 2782 AD104 [GeForce RTX 4070 Ti] + 2783 AD104 [GeForce RTX 4070 SUPER] 2785 AD104 [AC AD104 20GB] 2786 AD104 [GeForce RTX 4070] + 2788 AD104 [GeForce RTX 4060 Ti] 27a0 AD104M [GeForce RTX 4080 Max-Q / Mobile] 27b0 AD104GL [RTX 4000 SFF Ada Generation] + 27b1 AD104GL [RTX 4500 Ada Generation] 27b2 AD104GL [RTX 4000 Ada Generation] + 27b6 AD104GL [L2] 27b7 AD104GL [L16] 27b8 AD104GL [L4] 27ba AD104GLM [RTX 4000 Ada Generation Laptop GPU] @@ -12901,6 +13152,7 @@ 27fb AD104GLM [RTX 3500 Ada Generation Embedded GPU] 2803 AD106 [GeForce RTX 4060 Ti] 2805 AD106 [GeForce RTX 4060 Ti 16GB] + 2808 AD106 [GeForce RTX 4060] 2820 AD106M [GeForce RTX 4070 Max-Q / Mobile] 2838 AD106GLM [RTX 3000 Ada Generation Laptop GPU] 2860 AD106M [GeForce RTX 4070 Max-Q / Mobile] @@ -12908,7 +13160,11 @@ 2882 AD107 [GeForce RTX 4060] 28a0 AD107M [GeForce RTX 4060 Max-Q / Mobile] 28a1 AD107M [GeForce RTX 4050 Max-Q / Mobile] + 28b0 AD107GL [RTX 2000 / 2000E Ada Generation] 28b8 AD107GLM [RTX 2000 Ada Generation Laptop GPU] + 28b9 AD107GLM [RTX 1000 Ada Generation Laptop GPU] + 28ba AD107GLM [RTX 500 Ada Generation Laptop GPU] + 28bb AD107GLM [RTX 500 Ada Generation Laptop GPU] 28e0 AD107M [GeForce RTX 4060 Max-Q / Mobile] 28e1 AD107M [GeForce RTX 4050 Max-Q / Mobile] 28f8 AD107GLM [RTX 2000 Ada Generation Embedded GPU] @@ -13074,9 +13330,11 @@ 2011 Q-Motion Video Capture/Edit board 4750 S5930 [Matchmaker] 5920 S5920 + 801d Roper Scientific PCI TAXI interface 8043 LANai4.x [Myrinet LANai interface chip] 8062 S5933_PARASTATION 807d S5933 [Matchmaker] + 8081 GPIB interface card [IOtech Inc. PCI488] 8088 Kongsberg Spacetec Format Synchronizer 8089 Kongsberg Spacetec Serial Output Board 809c S5933_HEPC3 @@ -13120,6 +13378,7 @@ 8111 Twist3 Frame Grabber 10ec Realtek Semiconductor Co., Ltd. 0139 RTL-8139/8139C/8139C+ Ethernet Controller + 2600 Killer E2600 GbE Controller 3000 Killer E3000 2.5GbE Controller 4321 RTL8852BE 802.11ax PCIe Wireless Network Adapter 5208 RTS5208 PCI Express Card Reader @@ -13147,6 +13406,7 @@ 1028 06e6 Latitude 11 5175 2-in-1 1028 09be Latitude 7410 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 17aa 224f ThinkPad X1 Carbon 5th Gen 5260 RTS5260 PCI Express Card Reader 5261 RTS5261 PCI Express Card Reader @@ -13160,6 +13420,8 @@ 5762 RTS5762 NVMe SSD Controller 5763 RTS5763DL NVMe SSD Controller (DRAM-less) 5765 RTS5765DL NVMe SSD Controller (DRAM-less) + 5770 RTS5770DL NVMe SSD Controller (DRAM-less) + 5772 RTS5772DL NVMe SSD Controller (DRAM-less) 8029 RTL-8029(AS) 10b8 2011 EZ-Card (SMC1208) 10ec 8029 RTL-8029(AS) @@ -13168,6 +13430,7 @@ 1259 2400 AT-2400 1af4 1100 QEMU Virtual Machine 8125 RTL8125 2.5GbE Controller + 4c52 2022 LRES2022PT Single-port 2.5Gb Ethernet Network Adapter 8129 RTL-8129 10ec 8129 RT8129 Fast Ethernet Adapter 11ec 8129 RTL8111/8168 PCIe Gigabit Ethernet (misconfigured) @@ -13239,7 +13502,7 @@ 1458 e000 GA-MA69G-S3H Motherboard 1462 235c P965 Neo MS-7235 mainboard 1462 236c 945P Neo3-F motherboard - 8168 RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller + 8168 RTL8111/8168/8211/8411 PCI Express Gigabit Ethernet Controller 1019 8168 RTL8111/8168 PCI Express Gigabit Ethernet controller 1025 1094 Acer Aspire E5-575G 1028 0283 Vostro 220 @@ -13267,7 +13530,7 @@ 1043 8505 P8 series motherboard 1043 8554 H81M-C Motherboard 1043 859e AM1I-A Motherboard - 1043 8677 PRIME B450M-A Motherboard + 1043 8677 Onboard RTL8111H Ethernet 105b 0d7c D270S/D250S Motherboard 10ec 8168 RTL8111/8168 PCI Express Gigabit Ethernet controller 144d c652 RTL8168 on a NP300E5C series laptop @@ -13278,10 +13541,12 @@ 1462 4180 Wind PC MS-7418 1462 7522 X58 Pro-E 1462 7c37 X570-A PRO motherboard + 1734 11c0 RTL8211DN on Esprimo P510 D3171 motherboard 1775 11cc CC11/CL11 17aa 3098 ThinkCentre E73 17aa 3814 Z50-75 17aa 3823 Lenovo V130-15IGM Laptop - Type 81HL + 17aa 5068 Thinkpad E480/E580 17aa 5124 ThinkPad E595 1849 8168 Motherboard (one of many) 7470 3468 TG-3468 Gigabit PCI Express Network Adapter @@ -13342,16 +13607,21 @@ 8813 RTL8813AE 802.11ac PCIe Wireless Network Adapter 8821 RTL8821AE 802.11ac PCIe Wireless Network Adapter 8852 RTL8852AE 802.11ax PCIe Wireless Network Adapter + a85a RTL8852AE WiFi 6 802.11ax PCIe Adapter b723 RTL8723BE PCIe Wireless Network Adapter 10ec 8739 Dell Wireless 1801 17aa b736 Z50-75 + b821 RTL8821CE PCIe 802.11ac Wireless Network Controller b822 RTL8822BE 802.11a/b/g/n/ac WiFi adapter 103c 831b Realtek RTL8822BE 802.11ac 2x2 Wi-Fi + Bluetooth 4.2 Combo Adapter (MU-MIMO supported) 17aa 5124 ThinkPad E595 17aa b023 ThinkPad E595 + b852 RTL8852BE PCIe 802.11ax Wireless Network Controller + b85b RTL8852BE PCIe 802.11ax Wireless Network Controller [1T1R] c821 RTL8821CE 802.11ac PCIe Wireless Network Adapter c822 RTL8822CE 802.11ac PCIe Wireless Network Adapter c82f RTL8822CE 802.11ac PCIe Wireless Network Adapter + c852 RTL8852CE PCIe 802.11ax Wireless Network Controller d723 RTL8723DE 802.11b/g/n PCIe Adapter 10ed Ascii Corporation 7310 V7310 @@ -13379,6 +13649,8 @@ 500c Alveo U280 XDMA Platform 5020 Alveo U50 XMDA Platform 505c Alveo U55C + 5074 Alveo X3522, Quad Port, 10/25GbE Adaptable Accelerator Card + 5084 Alveo X3522, Quad Port, 10/25GbE Low Latency Network Adapter 6987 SmartSSD 6988 SmartSSD 7011 7-Series FPGA Hard PCIe block (AXI/debug) @@ -13609,6 +13881,8 @@ 0644 RocketRAID 644 4 Port SATA-III Controller (eSATA) 0645 RocketRAID 644L 4 Port SATA-III Controller (eSATA) 0646 RocketRAID 644LS SATA-III Controller (4 eSATA devices connected by 1 SAS cable) + 0750 Rocket 750 PCIe Gen2 SATA III Controller + 0840 RocketRAID 840 PCIe Gen3 SATA III Controller 1720 RocketRAID 1720 (2x SATA II RAID Controller) 1740 RocketRAID 1740 1742 RocketRAID 1742 @@ -13620,6 +13894,7 @@ 2322 RocketRAID 2322 SATA-II Controller 2340 RocketRAID 2340 16 Port SATA-II Controller 2640 RocketRAID 2640 SAS/SATA Controller + 2720 RocketRAID 2720 PCIe Gen2 6Gb/s SAS/SATA Controller 2722 RocketRAID 2722 # SFF-8087 Mini-SAS 16 port internal 2740 RocketRAID 2740 @@ -13627,12 +13902,21 @@ 2744 RocketRaid 2744 # SFF-8088 8 port external / SFF-8087 24 port internal 2782 RocketRAID 2782 + 2840 RocketRAID 2840 PCIe Gen3 6Gb/s SAS/SATA Controller 3120 RocketRAID 3120 3220 RocketRAID 3220 3320 RocketRAID 3320 + 3520 RocketRAID 3520 PCIe Gen1 8-Port SATA II Controller + 3530 RocketRAID 3530 PCIe Gen1 12-Port SATA II Controller + 3740 RocketRAID 3740 PCIe Gen3 12Gb/s SAS/SATA Controller 4310 RocketRaid 4310 + 4320 RocketRAID 4320 SAS Controller + 7103 SSD7103 PCIe Gen3 x16 4-Port M.2 NVMe RAID Controller + 7105 SSD7105 PCIe Gen3 x16 4-Port M.2 NVMe RAID Controller + 7110 SSD7110 PCIe Gen3 x16 NVMe RAID Controller 7505 SSD7505 PCIe Gen4 x16 4-Port M.2 NVMe RAID Controller 7540 SSD7540 PCIe Gen4 x16 8-Port M.2 NVMe RAID Controller + 7580 SSD7580 PCIe Gen4 x16 8-Port M.2 NVMe RAID Controller 1104 RasterOps Corp. 1105 Sigma Designs, Inc. 1105 REALmagic Xcard MPEG 1/2/3/4 DVD Decoder @@ -13818,6 +14102,7 @@ 1043 808c VT62xx USB1.1 4 port controller 1043 80a1 A7V8X-X motherboard 1043 80ed A7V600/K8V-X/A8V Deluxe motherboard + 1106 3038 USB 1.1 UHCI controller 1179 0001 Magnia Z310 1234 0925 MVP3 USB Controller 1458 5004 GA-7VAX Mainboard @@ -13850,6 +14135,7 @@ 1462 590d KT6 Delta-FIS2R (MS-6590) 1462 702d K8T NEO 2 motherboard 1462 971d MS-6917 + 153b 1146 Cameo DV Firewire controller 3050 VT82C596 Power Management 3051 VT82C596 Power Management 3053 VT6105M [Rhine-III] @@ -13945,7 +14231,7 @@ 1043 808c A7V8X motherboard 1043 80a1 A7V8X-X motherboard rev 1.01 1043 80ed A7V600/K8V-X/A8V Deluxe motherboard - 1106 3104 USB 2.0 Controller + 1106 3104 USB 2.0 EHCI controller 1297 f641 FX41 motherboard 1458 5004 GA-7VAX Mainboard 1462 5901 KT6 Delta-FIS2R (MS-6590) @@ -14263,6 +14549,8 @@ # Superfastcom-PCI (Commtech, Inc.) or DSCC4 WAN Adapter 2102 DSCC4 PEB/PEF 20534 DMA Supported Serial Communication Controller with 4 Channels 2104 Eicon Diva 2.02 compatible passive ISDN card +# S30807-Q5474 + 3101 HiPath 4000 PCI card 3141 SIMATIC NET CP 5611 / 5621 3142 SIMATIC NET CP 5613 / 5614 3143 SIMATIC NET CP 1613 @@ -15366,7 +15654,7 @@ 1179 0021 KIOXIA CD5 series SSD 1d49 4039 Thinksystem U.2 CM5 NVMe SSD 1d49 403a Thinksystem AIC CM5 NVMe SSD - 0113 BG3 NVMe SSD Controller + 0113 BG3 x2 NVMe SSD Controller (DRAM-less) 1179 0001 Toshiba KBG30ZMS128G 128GB NVMe SSD 0115 XG4 NVMe SSD Controller 0116 XG5 NVMe SSD Controller @@ -15392,6 +15680,7 @@ 0805 SD TypA Controller 0d01 FIR Port Type-DO 1179 0001 FIR Port Type-DO + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 117a A-Trend Technology 117b L G Electronics, Inc. 117c ATTO Technology, Inc. @@ -15686,6 +15975,8 @@ 000b ATP867-B 000d ATP8620 000e ATP8620 + 0011 ATP865-B + 1191 0011 ACARD AEC-6280 8002 AEC6710 SCSI-2 Host Adapter 8010 AEC6712UW SCSI 8020 AEC6712U SCSI @@ -15949,7 +16240,7 @@ 6281 88F6281 [Kirkwood] ARM SoC # This device ID was used for earlier chips. 6381 MV78xx0 [Discovery Innovation] ARM SoC - 6440 88SE6440 SAS/SATA PCIe controller + 6440 88SE63x0 x1, 88SE6440 x4 PCIe SAS/SATA 3Gb/s RAID controller 6450 64560 System Controller 6460 MV64360/64361/64362 System Controller 6480 MV64460/64461/64462 System Controller @@ -15960,7 +16251,7 @@ 6820 88F6820 [Armada 385] ARM SoC 6828 88F6828 [Armada 388] ARM SoC 6920 88F6920 [Armada 390] ARM SoC - 7042 88SX7042 PCI-e 4-port SATA-II + 7042 88SX7042 PCIe 4-port SATA-II controller 16b8 434b Tempo SATA E4P 7810 MV78100 [Discovery Innovation] ARM SoC 7820 MV78200 [Discovery Innovation] ARM SoC @@ -15983,7 +16274,9 @@ 11ae Aztech System Ltd 11af Avid Technology Inc. 0001 Cinema + ee21 Digidesign DSP Farm ee40 Digidesign Audiomedia III + ee60 Digidesign SampleCell II / II Plus 11b0 V3 Semiconductor Inc. 0002 V300PSC 0292 V292PBC [Am29030/40 Bridge] @@ -16225,6 +16518,38 @@ 11e2 Samsung Information Systems America 11e3 Quicklogic Corporation 0001 COM-ON-AIR Dosch&Amand DECT + 0010 QL5032 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0011 QL5032 (PBGA256) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0012 QL5232 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 11e3 1204 Becker & Hickl SPC-130 + 11e3 1207 Becker & Hickl DDG-200 + 11e3 1209 Becker & Hickl SHM-180 + 11e3 120c Becker & Hickl PMM-428 + 0013 QL5232 (PBGA456) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0014 QL5030 (TQFP144) [QuickPCI] 33 MHz/32-bit PCI Target with Embedded Programmable Logic and Dual Port SRAM + 0015 QL5130 (TQFP144) [QuickPCI] 33 MHz/32-bit PCI Target with Embedded Programmable Logic and Dual Port SRAM + 0016 QL5130 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Target with Embedded Programmable Logic and Dual Port SRAM + 11e3 120b Becker & Hickl DEL-350 + 0017 QL5130 (PBGA256) [QuickPCI] 33 MHz/32-bit PCI Target with Embedded Programmable Logic and Dual Port SRAM + 0019 QL5332 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 001a QL5332 (PBGA256) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 001b QL5432 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 11e3 120d Becker & Hickl SPC-140 + 11e3 1211 Becker & Hickl GVD-120 + 11e3 1212 Becker & Hickl DDG-210 + 001c QL5432 (PBGA456) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 001d QL5632 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 001e QL5632 (PBGA280) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 001f QL5632 (PBGA484) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0020 QL5632 (PBGA516) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0021 QL5732 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0022 QL5732 (PBGA280) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0023 QL5732 (PBGA484) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 0024 QL5732 (PBGA516) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 002d QL5022 (TQFP144) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 002e QL5022 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Master/Target with Embedded Programmable Logic and Dual Port SRAM + 002f QL5020 (TQFP144) [QuickPCI] 33 MHz/32-bit PCI Target with Embedded Programmable Logic and Dual Port SRAM + 0030 QL5020 (PQFP208) [QuickPCI] 33 MHz/32-bit PCI Target with Embedded Programmable Logic and Dual Port SRAM 0560 QL5064 Companion Design Demo Board 5030 PC Watchdog 8417 QL5064 [QuickPCI] PCI v2.2 bridge for SMT417 Dual TMS320C6416T PMC Module @@ -16426,9 +16751,9 @@ 13f7 1394 OHCI Compliant Host Controller 6729 OZ6729 673a OZ6730 - 6832 OZ6832/6833 CardBus Controller - 6836 OZ6836/6860 CardBus Controller - 6872 OZ6812 CardBus Controller + 6832 OZ6832/6833 CardBus Controller [Saturn] + 6836 OZ6836/6860 CardBus Controller [Mercury] + 6872 OZ6812 CardBus Controller [Challenger] 6925 OZ6922 CardBus Controller 6933 OZ6933/711E1 CardBus/SmartCardBus Controller 1025 1016 Travelmate 612 TX @@ -16473,6 +16798,7 @@ 8331 O2 Flash Memory Card 8520 SD/MMC Card Reader Controller 8621 SD/MMC Card Reader Controller + 17aa 5068 Thinkpad E480/E580 8760 FORESEE E2M2 NVMe SSD 1218 Hybricon Corp. 1219 First Virtual Corporation @@ -16602,6 +16928,7 @@ 00a3 VisionLink F4 00a9 VisionLink CLS 00ab PCIe8g3 A5 10G + 00b5 PCIe8 RFx SDR 123e Simutech, Inc. # nee C-Cube Microsystems / acquired by Magnum Semiconductor 123f LSI Logic @@ -16832,8 +17159,11 @@ 0820 SM820 Lynx3D 0910 SM910 2260 SM2260 NVMe SSD Controller + 2261 SM2261XT x2 NVMe SSD Controller (DRAM-less) 2262 SM2262/SM2262EN SSD Controller 2263 SM2263EN/SM2263XT (DRAM-less) NVMe SSD Controllers + 2269 SM2269XT (DRAM-less) NVMe SSD Controller + 8366 SM8366 NVMe SSD Controller [MonTitan] 1270 Olympus Optical Co., Ltd. 1271 GW Instruments 1272 Telematics International @@ -16902,7 +17232,7 @@ 1274 2000 Creative CT4810 [Sound Blaster AudioPCI 128] 1274 2003 Creative SoundBlaster AudioPCI 128 1274 5880 Creative CT4750 [Sound Blaster PCI 128] - 1274 8001 Sound Blaster 16PCI 4.1ch + 1274 8001 Creative CT4750 [Sound Blaster 16 PCI/PCI 128/4.1 Digital] 1458 a000 5880 AudioPCI On Motherboard 6OXET 1462 6880 5880 AudioPCI On Motherboard MS-6188 1.00 270f 2001 5880 AudioPCI On Motherboard 6CTR @@ -17106,6 +17436,7 @@ 1000 PXD1000 1296 Kofax Image Products 1297 Holco Enterprise Co, Ltd/Shuttle Computer + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 1298 Spellcaster Telecommunications Inc. 1299 Knowledge Technology Lab. 129a VMetro, inc. @@ -17152,7 +17483,8 @@ 10a9 8002 Acenic Gigabit Ethernet 12ae 0002 Gigabit Ethernet-T (3C986-T) 00fa Farallon PN9100-T Gigabit Ethernet -12af TDK USA Corp +12af TDK Corporation + 5831 GBDriver GX1 x2 NVMe SSD Controller (DRAM-less) 12b0 Jorge Scientific Corp 12b1 GammaLink 12b2 General Signal Networks @@ -17262,6 +17594,7 @@ 0009 DAC64 0018 Riva128 1048 0c10 VICTORY Erazor + 1048 0c15 VICTORY Erazor LT-8 107b 8030 STB Velocity 128 1092 0350 Viper V330 1092 1092 Viper V330 @@ -17301,13 +17634,14 @@ # PI7C9X20508GP 5Port-8Lane PCI Express Switch GreenPacket Family 0508 PI7C9X20508GP PCI Express Switch 5Port-8Lane 2304 PI7C9X2G304 EL/SL PCIe2 3-Port/4-Lane Packet Switch - 2308 PI7C9X2G308GP 8-lane PCI Express 2.0 Switch with 3 PCI Express ports + 2308 PI7C9X2G308GP 3-Ports/8-lane PCIe 2.0 Switch 2404 PI7C9X2G404 EL/SL PCIe2 4-Port/4-Lane Packet Switch 2608 PI7C9X2G608GP PCIe2 6-Port/8-Lane Packet Switch ea50 cc10 RXi2-BP - 400a PI7C9X442SL PCI Express Bridge Port - 400e PI7C9X442SL USB OHCI Controller - 400f PI7C9X442SL USB EHCI Controller + 400a PI7C9X442SL PCIe Bridge Port + 400c PI7C9X440SL PCIe Bridge Port + 400e PI7C9X440SL/PI7C9X442SL USB OHCI Controller + 400f PI7C9X440SL/PI7C9X442SL USB EHCI Controller 71e2 PI7C7300A/PI7C7300D PCI-to-PCI Bridge 71e3 PI7C7300A/PI7C7300D PCI-to-PCI Bridge (Secondary Bus 2) 8140 PI7C8140A PCI-to-PCI Bridge @@ -17316,9 +17650,12 @@ 8152 PI7C8152A/PI7C8152B/PI7C8152BI PCI-to-PCI Bridge 8154 PI7C8154A/PI7C8154B/PI7C8154BI PCI-to-PCI Bridge 8619 PI7C9X2G1616PR PCIe2 16-Port/16-Lane Packet Switch - e110 PI7C9X110 PCI Express to PCI bridge + b404 PI7C9X2G404 EV/SV PCIe2 4-Port/4-Lane Packet Switch + e110 PI7C9X110 PCIe- to-PCI bridge 1775 11cc CC11/CL11 CompactPCI Bridge e111 PI7C9X111SL PCIe-to-PCI Reversible Bridge + e112 PI7C9X112SL PCIe-to-PCI Bridge + e113 PI7C9X113SL/PI7C9X118SL PCIe-to-PCI Bridge e130 PCI Express to PCI-XPI7C9X130 PCI-X Bridge 12d9 Aculab PLC 0002 PCI Prosody @@ -17461,11 +17798,18 @@ 0035 PCI-DAS64/M1/16 0036 PCI-DAS64/M2/16 0037 PCI-DAS64/M3/16 + 004b PCI-MDB64 004c PCI-DAS1000 004d PCI-QUAD04 0052 PCI-DAS4020/12 0053 PCIM-DDA06/16 0054 PCI-DIO96 + 0055 CPCI-DIO24H + 0056 PCIM-DAS1602/16 + 0057 PCI-DAS3202/16 + 0059 PCI-QUAD-AC5 + 005a CPCI-DIO96H + 005b CPCI-DIO48H 005d PCI-DAS6023 005e PCI-DAS6025 005f PCI-DAS6030 @@ -17478,10 +17822,23 @@ 0066 PCI-DAS6052 0067 PCI-DAS6070 0068 PCI-DAS6071 + 006e PCI-CTR10 006f PCI-DAS6036 0070 PCI-DAC6702 + 0071 PCI-DAC6703 + 0074 PCI-CTR20HD + 0077 PCI-DIO24/LP 0078 PCI-DAS6013 0079 PCI-DAS6014 + 007b PCIM-DAS16JR/16 + 007e PCI-DIO24/S + 00a5 PCI-2511 + 00a6 PCI-2513 + 00a7 PCI-2515 + 00a8 PCI-2517 + 00be PCI-QUAD05 + 00da PCIe-DIO96H + 00db PCIe-DIO24 0115 PCIe-DAS1602/16 1308 Jato Technologies Inc. 0001 NetCelerator Adapter @@ -17643,6 +18000,26 @@ 1344 4000 3.2TB U.2 1344 5000 6.4 TB U.2 1344 6000 12.8TB U.2 + 51b7 7500 PRO NVMe SSD + 1028 22e7 DC NVMe 7500 U.2 SED RI 15.36TB + 1028 22e8 DC NVMe 7500 U.2 SED RI 7.68TB + 1028 22e9 DC NVMe 7500 U.2 SED RI 3.84TB + 1028 22ea DC NVMe 7500 U.2 SED RI 1.92TB + 1028 22eb DC NVMe 7500 U.2 SED RI 960GB + 1028 22ec DC NVMe 7500 U.2 ISE RI 15.36TB + 1028 22ed DC NVMe 7500 U.2 ISE RI 7.68TB + 1028 22ee DC NVMe 7500 U.2 ISE RI 3.84TB + 1028 22ef DC NVMe 7500 U.2 ISE RI 1.92TB + 1028 22f0 DC NVMe 7500 U.2 ISE RI 960GB + 51b8 7500 MAX NVMe SSD + 1028 22f1 DC NVMe 7500 U.2 ISE MU 12.8TB + 1028 22f2 DC NVMe 7500 U.2 ISE MU 6.4TB + 1028 22f3 DC NVMe 7500 U.2 ISE MU 3.2TB + 1028 22f4 DC NVMe 7500 U.2 ISE MU 1.6TB + 1028 22f5 DC NVMe 7500 U.2 ISE MU 800GB + 51b9 6500 ION NVMe SSD + 1028 22e6 Ent NVMe 6500 RI 30.72TB + 1028 22f6 Ent NVMe 6500 RI FIPS 30.72TB 51c0 7400 PRO NVMe SSD 1028 2162 EC NVMe OPAL 7400 RI M.2 480GB 1028 2163 EC NVMe OPAL 7400 RI M.2 960GB @@ -17680,7 +18057,38 @@ 1344 4000 U.3 3200GB 1344 5000 U.3 6400GB 51c3 7450 PRO NVMe SSD + 1028 226b EC NVMe FIPS 7450 RI M.2 110 960GB + 1028 226c EC NVMe ISE 7450 RI M.2 80 480GB + 1028 226d EC NVMe ISE 7450 RI M.2 80 960GB + 1028 226e EC NVMe SED 7450 RI M.2 80 480GB + 1028 226f EC NVMe SED 7450 RI M.2 80 960GB + 1028 2270 EC NVMe FIPS 7450 RI M.2 80 480GB + 1028 2271 EC NVMe FIPS 7450 RI M.2 80 960GB + 1028 2273 EC NVMe ISE 7450 RI M.2 110 960GB + 1028 2274 EC NVMe ISE 7450 RI M.2 110 1920GB + 1028 2275 EC NVMe ISE 7450 RI M.2 110 3840GB + 1028 2278 DC NVMe ISE 7450 RI U.2 960GB + 1028 2279 DC NVMe ISE 7450 RI U.2 1.92TB + 1028 227a DC NVMe ISE 7450 RI U.2 3.84TB + 1028 227b DC NVMe ISE 7450 RI U.2 7.68TB + 1028 227c DC NVMe ISE 7450 RI U.2 15.36TB + 1028 227d DC NVMe SED 7450 RI U.2 960GB + 1028 227e DC NVMe SED 7450 RI U.2 1.92TB + 1028 227f DC NVMe SED 7450 RI U.2 3.84TB + 1028 2280 DC NVMe SED 7450 RI U.2 7.68TB + 1028 2281 DC NVMe SED 7450 RI U.2 15.36TB 51c4 7450 MAX NVMe SSD + 1028 2272 EC NVMe ISE 7450 MU M.2 80 800GB + 1028 228b DC NVMe SED 7450 MU U.2 800GB + 1028 228c DC NVMe ISE 7450 MU U.2 800GB + 1028 228d DC NVMe SED 7450 MU U.2 1.6TB + 1028 228e DC NVMe ISE 7450 MU U.2 1.6TB + 1028 228f DC NVMe SED 7450 MU U.2 3.2TB + 1028 2290 DC NVMe ISE 7450 MU U.2 3.2TB + 1028 2291 DC NVMe SED 7450 MU U.2 6.4TB + 1028 2292 DC NVMe ISE 7450 MU U.2 6.4TB + 1028 2293 DC NVMe SED 7450 MU U.2 12.8TB + 1028 2294 DC NVMe ISE 7450 MU U.2 12.8TB 1344 3000 U.3 1600GB [MTFDKCB1T6TFS/MTFDKCC1T6TFS] 5404 2210 NVMe SSD [Cobain] 5405 2300 NVMe SSD [Santana] @@ -17689,6 +18097,8 @@ 5411 2450 NVMe SSD [HendrixV] (DRAM-less) 5413 2400 NVMe SSD (DRAM-less) 5414 3460 NVMe SSD + 5415 3500 NVMe SSD + 5416 2550 NVMe SSD (DRAM-less) 6001 2100AI NVMe SSD [Nitro] 1345 Arescom Inc 1347 Odetics @@ -17727,8 +18137,97 @@ 1355 Kratos Analytical Ltd 1356 The Logical Co 1359 Prisa Networks -135a Brain Boxes - 0a61 UC-324 [VELOCITY RS422/485] +135a Brainboxes Ltd + 0841 UC-268 4 port RS-232 card + 0861 UC-257 2 port RS-232 + LPT card + 0862 UC-257 2 port RS-232 + LPT card + 0863 UC-257 2 port RS-232 + LPT card + 0881 UC-279 8 port RS-232 card + 08a1 UC-313 2 port RS-422/485 card + 08a2 UC-313 2 port RS-422/485 card + 08a3 UC-313 2 port RS-422/485 card + 08c1 UC-310 2 port RS-422/485 Opto Isolated card + 08e1 UC-302 2 port RS-232 card + 08e2 UC-302 2 port RS-232 card + 08e3 UC-302 2 port RS-232 card + 0901 UC-431 3 port RS-232 card + 0921 UC-420 3 + 1 port RS-232 card + 0981 UC-475 1 + 1 port RS-232 + LPT card + 0982 UC-475 1 + 1 port RS-232 + LPT card + 09a1 UC-607 2 port RS-232 card + 09a2 UC-607 2 port RS-232 card + 09a3 UC-607 2 port RS-232 card + 0a61 UC-324 1 port RS-422/485 card + 0a81 UC-357 1 port RS-232 + 1 port RS-422/485 card + 0a82 UC-357 1 port RS-232 + 1 port RS-422/485 card + 0a83 UC-357 1 port RS-232 + 1 port RS-422/485 card + 0aa1 UC-246 1 port RS-232 card + 0aa2 UC-246 1 port RS-232 card + 0ac1 UP-189 Powered 2 port RS-232 card + 0ac2 UP-189 Powered 2 port RS-232 card + 0ac3 UP-189 Powered 2 port RS-232 card + 0b01 UC-346 4 port RS-422/485 card + 0b02 UC-346 4 port RS-422/485 card + 0b21 UP-200 Powered 2 port RS-232 card + 0b22 UP-200 Powered 2 port RS-232 card + 0b23 UP-200 Powered 2 port RS-232 card + 0ba1 UC-101 1 + 1 port RS-232 card + 0bc1 UC-203 1 + 1 port RS-232 + LPT card + 0bc2 UC-203 1 + 1 port RS-232 + LPT card + 0be1 UC-146 LPT card + 0be2 UC-146 LPT card + 0c01 UP-869 Powered 2 port RS-232 card + 0c02 UP-869 Powered 2 port RS-232 card + 0c03 UP-869 Powered 2 port RS-232 card + 0c21 UP-880 Powered 2 port RS-232 card + 0c22 UP-880 Powered 2 port RS-232 card + 0c23 UP-880 Powered 2 port RS-232 card + 0c41 UC-368 4 port RS-422/485 Opto Isolated card + 0ca1 UC-253 2 port RS-232 card + 0d21 UC-260 4 port RS-232 card + 0d41 UC-836 4 port RS-232 card + 0d60 IS-100 1 port RS-232 card + 0d80 IS-200 2 port RS-232 card + 0da0 IS-300 1 port RS-232 + LPT card + 0dc0 IS-400 4 port RS-232 card + 0de0 IS-500 LPT card + 0e41 PX-279 8 port RS-232 card + 0e61 UC-414 3 + 1 port RS-232 + LPT card + 4000 PX-420 3 + 1 port RS-232 card + 4001 PX-431 3 port RS-232 card + 4002 PX-820 Powered 3 + 1 port RS-232 card + 4003 PX-831 Powered 3 port RS-232 card + 4004 PX-235 1 port RS-232 card + 4005 PX-101 1 + 1 port RS-232 card + 4006 PX-257 1 + 1 port RS-232 + LPT card (Serial port) + 4007 PX-257 1 + 1 port RS-232 + LPT card (LPT port) + 4008 PX-835 Powered 1 port RS-232 card + 4009 PX-857 Powered 2 port RS-232 card + 400a PX-260 4 port RS-232 card + 400b PX-320 1 port RS-422/485 card + 400c PX-313 2 port RS-422/485 card + 400e PX-310 2 port RS-422/485 Opto Isolated card + 400f PX-346 4 port RS-422/485 card + 4010 PX-368 4 port RS-422/485 Opto Isolated card + 4011 PX-420 3 + 1 port RS-232 card + 4012 PX-431 3 port RS-232 card + 4013 PX-820 Powered 3 + 1 port RS-232 card + 4014 PX-831 Powered 3 port RS-232 card + 4015 PX-257 2 port RS-232 card + 4016 PX-235 1 port RS-232 card + 4017 PX-835 Powered 1 port RS-232 card + 4018 PX-857 Powered 2 port RS-232 card + 4019 PX-101 1 + 1 port RS-232 card + 401c PX-146 LPT card + 401d PX-475 1 port RS-232 + LPT card (Serial port) + 401e PX-803 Powered 1 + 1 port RS-232 card + 401f PX-475 1 port RS-232 + LPT card (LPT port) + 4027 IX-100 1 port RS-232 card + 4028 IX-200 2 port RS-232 card + 4029 IX-400 4 port RS-232 card + 402a IX-500 LPT card + 402c PX-263 4 port RS-232 + LPT card + 4100 PX-272 4 + 1 port RS-232 + LPT card 135b Giganet Inc 135c Quatech Inc 0010 QSC-100 @@ -17738,12 +18237,19 @@ 0050 ESC-100D 0060 ESC-100M 00f0 MPAC-100 Synchronous Serial Card (Zilog 85230) + 0120 QSCP-100 + 0130 DSCP-100 + 0140 QSCP-200/300 + 0150 DSCP-200/300 0170 QSCLP-100 0180 DSCLP-100 + 0181 DSC-100 0190 SSCLP-100 01a0 QSCLP-200/300 01b0 DSCLP-200/300 + 01b1 DSC-200/300 01c0 SSCLP-200/300 + 01e0 ESC(LP)-100 0258 DSPSX-200/300 135d ABB Network Partner AB 135e Sealevel Systems Inc @@ -18233,6 +18739,7 @@ 13fc Computer Peripherals International 13fd Micro Science Inc 13fe Advantech Co. Ltd + 0071 PCIE-1761H, 8-ch Relay and 8-ch Isolated Digital Input Card 1240 PCI-1240 4-channel stepper motor controller card 1600 PCI-16xx series PCI multiport serial board (function 0) # This board has two PCI functions, appears as two PCI devices @@ -18448,6 +18955,9 @@ 580b Secure Flash Controller (Xenon) 580d System Management Controller (Xenon) 5811 Xenos GPU (Xenon) + 5821 Xenos GPU (Zephyr/Falcon) + 5831 Xenos GPU (Jasper) + 5841 Xenos GPU (Slim) 1415 Oxford Semiconductor Ltd 8401 OX9162 Mode 1 (8-bit bus) 8403 OX9162 Mode 0 (parallel port) @@ -19187,14 +19697,14 @@ 6002 T6225-SO-CR Unified Wire Ethernet Controller 6003 T6425-CR Unified Wire Ethernet Controller 6004 T6425-SO-CR Unified Wire Ethernet Controller - 6005 T6225-OCP-SO Unified Wire Ethernet Controller - 6006 T62100-OCP-SO Unified Wire Ethernet Controller + 6005 T6225-SO-OCP3 Unified Wire Ethernet Controller + 6006 T6225-OCP3 Unified Wire Ethernet Controller 6007 T62100-LP-CR Unified Wire Ethernet Controller 6008 T62100-SO-CR Unified Wire Ethernet Controller 6009 T6210-BT Unified Wire Ethernet Controller 600d T62100-CR Unified Wire Ethernet Controller 6011 T6225-LL-CR Unified Wire Ethernet Controller - 6014 T61100-OCP-SO Unified Wire Ethernet Controller + 6014 T62100-SO-OCP3 Unified Wire Ethernet Controller 6015 T6201-BT Unified Wire Ethernet Controller 6080 T6225-6080 Unified Wire Ethernet Controller 6081 T62100-6081 Unified Wire Ethernet Controller @@ -19213,14 +19723,14 @@ 6402 T6225-SO-CR Unified Wire Ethernet Controller 6403 T6425-CR Unified Wire Ethernet Controller 6404 T6425-SO-CR Unified Wire Ethernet Controller - 6405 T6225-OCP-SO Unified Wire Ethernet Controller - 6406 T62100-OCP-SO Unified Wire Ethernet Controller + 6405 T6225-SO-OCP3 Unified Wire Ethernet Controller + 6406 T6225-OCP3 Unified Wire Ethernet Controller 6407 T62100-LP-CR Unified Wire Ethernet Controller 6408 T62100-SO-CR Unified Wire Ethernet Controller 6409 T6210-BT Unified Wire Ethernet Controller 640d T62100-CR Unified Wire Ethernet Controller 6411 T6225-LL-CR Unified Wire Ethernet Controller - 6414 T61100-OCP-SO Unified Wire Ethernet Controller + 6414 T62100-SO-OCP3 Unified Wire Ethernet Controller 6415 T6201-BT Unified Wire Ethernet Controller 6480 T6225-6080 Unified Wire Ethernet Controller 6481 T62100-6081 Unified Wire Ethernet Controller @@ -19239,14 +19749,14 @@ 6502 T6225-SO-CR Unified Wire Storage Controller 6503 T6425-CR Unified Wire Storage Controller 6504 T6425-SO-CR Unified Wire Storage Controller - 6505 T6225-OCP-SO Unified Wire Storage Controller - 6506 T62100-OCP-SO Unified Wire Storage Controller + 6505 T6225-SO-OCP3 Unified Wire Storage Controller + 6506 T6225-OCP3 Unified Wire Storage Controller 6507 T62100-LP-CR Unified Wire Storage Controller 6508 T62100-SO-CR Unified Wire Storage Controller 6509 T6210-BT Unified Wire Storage Controller 650d T62100-CR Unified Wire Storage Controller 6511 T6225-LL-CR Unified Wire Storage Controller - 6514 T61100-OCP-SO Unified Wire Storage Controller + 6514 T62100-SO-OCP3 Unified Wire Storage Controller 6515 T6201-BT Unified Wire Storage Controller 6580 T6225-6080 Unified Wire Storage Controller 6581 T62100-6081 Unified Wire Storage Controller @@ -19264,14 +19774,14 @@ 6602 T6225-SO-CR Unified Wire Storage Controller 6603 T6425-CR Unified Wire Storage Controller 6604 T6425-SO-CR Unified Wire Storage Controller - 6605 T6225-OCP-SO Unified Wire Storage Controller - 6606 T62100-OCP-SO Unified Wire Storage Controller + 6605 T6225-SO-OCP3 Unified Wire Storage Controller + 6606 T6225-OCP3 Unified Wire Storage Controller 6607 T62100-LP-CR Unified Wire Storage Controller 6608 T62100-SO-CR Unified Wire Storage Controller 6609 T6210-BT Unified Wire Storage Controller 660d T62100-CR Unified Wire Storage Controller 6611 T6225-LL-CR Unified Wire Storage Controller - 6614 T61100-OCP-SO Unified Wire Storage Controller + 6614 T62100-SO-OCP3 Unified Wire Storage Controller 6615 T6201-BT Unified Wire Storage Controller 6680 T6225-6080 Unified Wire Storage Controller 6681 T62100-6081 Unified Wire Storage Controller @@ -19289,14 +19799,14 @@ 6802 T6225-SO-CR Unified Wire Ethernet Controller [VF] 6803 T6425-CR Unified Wire Ethernet Controller [VF] 6804 T6425-SO-CR Unified Wire Ethernet Controller [VF] - 6805 T6225-OCP-SO Unified Wire Ethernet Controller [VF] - 6806 T62100-OCP-SO Unified Wire Ethernet Controller [VF] + 6805 T6225-SO-OCP3 Unified Wire Ethernet Controller [VF] + 6806 T6225-OCP3 Unified Wire Ethernet Controller [VF] 6807 T62100-LP-CR Unified Wire Ethernet Controller [VF] 6808 T62100-SO-CR Unified Wire Ethernet Controller [VF] 6809 T6210-BT Unified Wire Ethernet Controller [VF] 680d T62100-CR Unified Wire Ethernet Controller [VF] 6811 T6225-LL-CR Unified Wire Ethernet Controller [VF] - 6814 T61100-OCP-SO Unified Wire Ethernet Controller [VF] + 6814 T62100-SO-OCP3 Unified Wire Ethernet Controller [VF] 6815 T6201-BT Unified Wire Ethernet Controller [VF] 6880 T6225-6080 Unified Wire Ethernet Controller [VF] 6881 T62100-6081 Unified Wire Ethernet Controller [VF] @@ -19391,9 +19901,10 @@ 144d a801 SM963 2.5" NVMe PCIe SSD a806 NVMe SSD SM0032L a808 NVMe SSD Controller SM981/PM981/PM983 - 144d a801 SSD 970 EVO +# Used by different variants of SSD 970 EVO and PRO + 144d a801 SSD 970 EVO/PRO 1d49 403b Thinksystem U.2 PM983 NVMe SSD - a809 NVMe SSD Controller 980 + a809 NVMe SSD Controller 980 (DRAM-less) a80a NVMe SSD Controller PM9A1/PM9A3/980PRO 0128 215a DC NVMe PM9A3 RI U.2 960GB 0128 215b DC NVMe PM9A3 RI U.2 1.92TB @@ -19413,9 +19924,12 @@ 1028 2276 DC NVMe PM9A3 RI 110M.2 960GB 1028 2277 DC NVMe PM9A3 RI 110M.2 1.92TB 1028 512d DC NVMe PM9A3 RI U.2 7.68TB + 144d a801 SSD 980 PRO 144d a813 General DC NVMe PM9A3 - a80b NVMe SSD Controller PM9B1 +# Actually 88SS1322 according to techpowerup + a80b NVMe SSD Controller PM9B1 (DRAM-less) a80c NVMe SSD Controller S4LV008[Pascal] + a80d NVMe SSD Controller PM9C1a (DRAM-less) a820 NVMe SSD Controller 171X 1028 1f95 Express Flash NVMe XS1715 SSD 400GB 1028 1f96 Express Flash NVMe XS1715 SSD 800GB @@ -19538,6 +20052,7 @@ 1028 225d NVMe PM1745 MU U.2 6.4TB 1028 225e NVMe FIPS PM1745 MU U.2 12.8TB 1028 225f NVMe PM1745 MU U.2 12.8TB + a900 NVMe SSD Controller PM9DXa ecec Exynos 8895 PCIe Root Complex 144e OLITEC 144f Askey Computer Corp. @@ -19658,9 +20173,18 @@ 14a2 Millennium Engineering Inc 14a3 Maverick Networks 14a4 Lite-On Technology Corporation + 2100 CA1-8D128 NVMe SSD + 2200 CX2-8B256, CX2-8B512 NVMe SSD + 22a0 EP2-KB960 NVMe SSD 22f1 M8Pe Series NVMe SSD + 2300 CA3-8D256, CA3-8D512 NVMe SSD + 23f1 M9PeG, M9PeGN, M9PeY NVMe SSD + 2f00 CAZ-82512 NVMe SSD + 3500 CA5-8D512 NVMe SSD # Wrong vendor ID used 4318 Broadcom BCM4318 [AirForce One 54g] 802.11g WLAN Controller + 5100 CB1-SD256, CB1-SD512 NVMe SSD + 9100 CL1-3D256, CL1-8D512 NVMe SSD (DRAM-less) 14a5 XIONICS Document Technologies Inc 14a6 INOVA Computers GmBH & Co KG 14a7 MYTHOS Systems Inc @@ -19726,16 +20250,20 @@ 14c3 MEDIATEK Corp. 0608 MT7921K (RZ608) Wi-Fi 6E 80MHz 0616 MT7922 802.11ax PCI Express Wireless Network Adapter + 7603 MT7603E 802.11bgn PCI Express Wireless Network Adapter 7612 MT7612E 802.11acbgn PCI Express Wireless Network Adapter 7615 MT7615E 802.11ac PCI Express Wireless Network Adapter 7630 MT7630e 802.11bgn Wireless Network Adapter + 7650 MT7650 802.11ac # MT7612E too? 7662 MT7662E 802.11ac PCI Express Wireless Network Adapter 7915 MT7915E 802.11ax PCI Express Wireless Network Adapter 7916 MT7905D/MT7975 # WiFi 6E capable 7922 MT7922 802.11ax PCI Express Wireless Network Adapter + 1a3b 5300 ASUS PCE-AXE59BT 7961 MT7921 802.11ax PCI Express Wireless Network Adapter + 8650 MT7650 Bluetooth 14c4 IWASAKI Information Systems Co Ltd 14c5 Automation Products AB 14c6 Data Race Inc @@ -19980,9 +20508,11 @@ 103c 3383 Ethernet 1Gb 4-port 331T Adapter 14e4 1904 4-port 1Gb Ethernet Adapter 14e4 1909 Broadcom NetXtreme 5719 Quad Port Gigabit NIC - 14e4 d146 BCM95719-P41 4x1GBT Ethernet NIC - 14e4 d346 BCM95719-N41 4x1GBT Ethernet NIC + 14e4 d166 BCM95719-P41 4x1GBT Ethernet NIC + 14e4 d366 BCM95719-N41 4x1GBT Ethernet NIC 193d 1025 NIC-ETH330T-LP-4P +# NIC-ETH330T-3S-4P 4xGE 1000Base-T for OCP3.0 + 193d 1086 NIC-ETH330T-3S-4P 1659 NetXtreme BCM5721 Gigabit Ethernet PCI Express 1014 02c6 eServer xSeries server mainboard 1028 01e6 PowerEdge 860 @@ -20110,6 +20640,7 @@ 193d 1003 530F-B 193d 1006 530F-L 193d 100f NIC-ETH522i-Mb-2x10G + 4c52 9812 LREC9812AF Dual-port 10Gb Ethernet Server Adapter 1690 NetXtreme BCM57760 Gigabit Ethernet PCIe 1691 NetLink BCM57788 Gigabit Ethernet PCIe 1028 04aa XPS 8300 @@ -20137,6 +20668,7 @@ 16a1 BCM57840 NetXtreme II 10 Gigabit Ethernet 1043 866e PEB-10G/57840-2T 10GBase-T Network Adapter 193d 100b NIC-ETH521i-Mb-4x10G + 4c52 9814 LREC9814AF Quad-port 10Gb Ethernet Server Adapter 16a2 BCM57840 NetXtreme II 10/20-Gigabit Ethernet 103c 1916 FlexFabric 20Gb 2-port 630FLB Adapter 103c 1917 FlexFabric 20Gb 2-port 630M Adapter @@ -20264,6 +20796,8 @@ 152d 8b22 BCM57412 NetXtreme-E 25Gb RDMA Ethernet Controller # NIC-ETH531F-LP-2P BCM57412 2 x 10G SFP+ Ethernet PCIe Card 193d 1024 NIC-ETH531F-LP-2P +# NIC-ETH531F-3S-2P 2x10GbE SFP+ Adapter for OCP3.0 + 193d 1087 NIC-ETH531F-3S-2P 16d7 BCM57414 NetXtreme-E 10Gb/25Gb RDMA Ethernet Controller 117c 00cc FastFrame N422 Dual-port 25Gb Ethernet Adapter 14e4 1402 BCM957414A4142CC 10Gb/25Gb Ethernet PCIe @@ -20353,7 +20887,10 @@ 14e4 5250 NetXtreme-E BCM57504 4x25G KR Mezz 14e4 5425 NetXtreme-E Quad-port 25G SFP28 Ethernet OCP 3.0 Adapter (BCM957504-N425G) 14e4 d142 NetXtreme-E P425D BCM57504 4x25G SFP28 PCIE + 1590 0420 HPE Ethernet 25/50Gb 2-port 6310C Adapter 1752 BCM57502 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet + 1760 BCM57608 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet + 14e4 d125 BCM57608 2x200G PCIe Ethernet NIC 1800 BCM57502 NetXtreme-E Ethernet Partition 1801 BCM57504 NetXtreme-E Ethernet Partition 1802 BCM57508 NetXtreme-E Ethernet Partition @@ -20371,6 +20908,7 @@ 1809 BCM5750X NetXtreme-E RDMA Virtual Function 14e4 df24 BCM57508 NetXtreme-E NGM2100D 2x100G KR Mezz RDMA Virtual Function 2711 BCM2711 PCIe Bridge + 2712 BCM2712 PCIe Bridge 3352 BCM3352 3360 BCM3360 4210 BCM4210 iLine10 HomePNA 2.0 @@ -20542,16 +21080,16 @@ 4360 BCM4360 802.11ac Wireless Network Adapter 4365 BCM43142 802.11b/g/n 1028 0016 Wireless 1704 802.11n + BT 4.0 - 43a0 BCM4360 802.11ac Wireless Network Adapter - 43a1 BCM4360 802.11ac Wireless Network Adapter - 43a2 BCM4360 802.11ac Wireless Network Adapter + 43a0 BCM4360 802.11ac Dual Band Wireless Network Adapter + 43a1 BCM4360 802.11ac 2,4G Wireless Network Adapter + 43a2 BCM4360 802.11ac 5G Wireless Network Adapter 43a3 BCM4350 802.11ac Wireless Network Adapter # Manufactured by Foxconn for Lenovo 17aa 075a 00JT494 43a9 BCM43217 802.11b/g/n 43aa BCM43131 802.11b/g/n 43ae BCM43162 802.11ac Wireless Network Adapter - 43b1 BCM4352 802.11ac Wireless Network Adapter + 43b1 BCM4352 802.11ac Dual Band Wireless Network Adapter 1043 85ba PCE-AC56 Dual-Band Wireless PCI-E Adapter 43ba BCM43602 802.11ac Wireless LAN SoC 43bb BCM43602 802.11ac Wireless LAN SoC @@ -20577,12 +21115,14 @@ 441f BCM4361 802.11ac Dual-Band Wireless Network Controller 4420 BCM4361 802.11ac 2.4 GHz Wireless Network Controller 4421 BCM4361 802.11ac 5 GHz Wireless Network Controller - 4425 BRCM4378 Wireless Network Adapter + 4425 BCM4378 802.11ax Dual Band Wireless Network Adapter 4430 BCM44xx CardBus iLine32 HomePNA 2.0 4432 BCM4432 CardBus 10/100BaseT + 4433 BCM4387 802.11ax Dual Band Wireless LAN Controller 4464 BCM4364 802.11ac Wireless Network Adapter # brcmfmac reports it as BCM4377/4 but macOS drivers call it BCM4377b 4488 BCM4377b Wireless Network Adapter + 449d BCM43752 802.11ax Dual Band Wireless LAN Controller 4610 BCM4610 Sentry5 PCI to SB Bridge 4611 BCM4610 Sentry5 iLine32 HomePNA 1.0 4612 BCM4610 Sentry5 V.90 56k Modem @@ -20900,6 +21440,10 @@ 17de 08a6 KWorld/VStream XPert DVB-T 17de 08b2 KWorld DVB-S 100 17de a8a6 digitalnow DNTV Live! DVB-T + 1805 0111 PICOLO Jet-X Video + 1805 0112 PICOLO Jet-X Video + 1805 0113 PICOLO Jet-X Video + 1805 0114 PICOLO Jet-X Video 1822 0025 digitalnow DNTV Live! DVB-T Pro 185b e000 VideoMate X500 18ac d500 FusionHDTV 5 Gold @@ -20928,6 +21472,10 @@ 14f1 0187 Conexant DVB-T reference design 17de 08a1 XPert DVB-T PCI BDA DVBT 23880 Transport Stream Capture 17de 08a6 KWorld/VStream XPert DVB-T + 1805 0111 PICOLO Jet-X Jpeg + 1805 0112 PICOLO Jet-X Jpeg + 1805 0113 PICOLO Jet-X Jpeg + 1805 0114 PICOLO Jet-X Jpeg 18ac d500 DViCO FusionHDTV5 Gold 18ac d810 DViCO FusionHDTV3 Gold-Q 18ac d820 DViCO FusionHDTV3 Gold-T @@ -20940,6 +21488,10 @@ 0070 6902 WinTV HVR-4000-HD 0070 9002 Nova-T DVB-T Model 909 0070 9402 WinTV-HVR1100 DVB-T/Hybrid + 1805 0111 PICOLO Jet-X Control + 1805 0112 PICOLO Jet-X Control + 1805 0113 PICOLO Jet-X Control + 1805 0114 PICOLO Jet-X Control 7063 5500 pcHDTV HD-5500 8811 CX23880/1/2/3 PCI Video and Audio Decoder [Audio Port] 0070 3400 WinTV 34604 @@ -21165,6 +21717,7 @@ 9290 FPGA Card 9300 Universal Exhaust Gas Oxygen Sensor Simulator 9310 Digital Programmable Resistor + 9320 Arria 10 FPGA Card 9350 Analog Input Card 1543 SILICON Laboratories 3052 Intel 537 [Winmodem] @@ -21200,6 +21753,7 @@ be00 PCI Express Bridge 1557 MEDIASTAR Co Ltd 1558 CLEVO/KAPOK Computer + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 1559 SI LOGIC Ltd 155a INNOMEDIA Inc 155b PROTAC INTERNATIONAL Corp @@ -21283,6 +21837,8 @@ 0001 Eagle Cluster Manager 0002 Osprey Cluster Manager 0003 Harrier Cluster Manager + 0371 Cassini 2 [Slingshot 400Gb] + 0372 Cassini 2 [Slingshot 400Gb] SR-IOV VF a01d FC044X Fibre Channel HBA 1591 ARN 1592 Syba Tech Ltd @@ -21373,6 +21929,10 @@ 021f CX8 Family [ConnectX-8 Secure Flash Recovery] 0220 BF4 Family Flash Recovery [BlueField-4 SoC Flash Recovery] 0221 BF4 Family Secure Flash Recovery [BlueField-4 Secure Flash Recovery] + 0222 CX8 PCIe Switch Family [ConnectX-8 PCIe Switch Flash Recovery] + 0223 CX8 PCIe Switch Family [ConnectX-8 PCIe Switch Secure Flash Recovery-RMA] + 0224 CX9 Family [ConnectX-9 Flash Recovery] + 0225 CX9 Family [ConnectX-9 Secure Flash Recovery-RMA] 024e MT53100 [Spectrum-2, Flash recovery mode] 024f MT53100 [Spectrum-2, Secure Flash recovery mode] 0250 Spectrum-3, Flash recovery mode @@ -21392,16 +21952,23 @@ 0262 MT27710 [ConnectX-4 Lx Programmable] EN 0263 MT27710 [ConnectX-4 Lx Programmable Virtual Function] EN 0264 Innova-2 Flex Burn image - 0270 Spectrum-4L, Flash recovery mode + 0270 Spectrum-5 in Flash Recovery Mode 0271 Spectrum-4L, RMA - 0274 Spectrum-4C, Flash recovery mode + 0274 Spectrum-6 in Flash Recovery Mode 0275 Spectrum-4C RMA 0277 Spectrum-4TOR RMA + 0278 Quantum-4 in Flash Recovery Mode + 0279 Quantum-4 RMA 0281 NPS-600 Flash Recovery 0282 ArcusE Flash recovery 0283 ArcusE RMA 0284 Sagitta 0285 Sagitta RMA + 0286 LibraE Flash Recovery + 0287 LibraE RMA +# Flash recovery + 0288 Arcus2 + 0289 Arcus2 RMA 1002 MT25400 Family [ConnectX-2 Virtual Function] 1003 MT27500 Family [ConnectX-3] 1014 04b5 PCIe3 40GbE RoCE Converged Host Bus Adapter for Power @@ -21502,6 +22069,8 @@ 1020 MT28860 1021 MT2910 Family [ConnectX-7] 1023 CX8 Family [ConnectX-8] + 1024 CX8 PCIe Switch Family [ConnectX-8 PCIe Switch] + 1025 CX9 Family [ConnectX-9] 1974 MT28800 Family [ConnectX-5 PCIe Bridge] 1975 MT416842 Family [BlueField SoC PCIe Bridge] 1976 MT28908 Family [ConnectX-6 PCIe Bridge] @@ -21511,11 +22080,14 @@ 197a MT43162 Family [BlueField-3 Lx SoC PCIe Bridge] 197b MT43244 Family [BlueField-3 SoC PCIe Bridge] 197c ConnectX/BlueField Family mlx5Gen PCIe Bridge [PCIe Bridge] + 197d CX8 Family [ConnectX-8 PCIe Bridge] + 197e CX9 Family [ConnectX-9 PCIe Bridge] 2020 MT2892 Family [ConnectX-6 Dx Emulated PCIe Bridge] 2021 MT42822 Family [BlueField-2 SoC Emulated PCIe Bridge] 2023 MT2910 Family [ConnectX-7 Emulated PCIe Bridge] 2024 MT43244 Family [BlueField-3 SoC Emulated PCIe Bridge] 2025 ConnectX/BlueField Family mlx5Gen Emulated PCIe Bridge [Emulated PCIe Bridge] + 2100 CX8 Family [CX8 Data Direct Interface] 4117 MT27712A0-FDCF-AE 1bd4 0039 SN10XMP2P25 1bd4 003a 25G SFP28 SP EO251FM9 Adapter @@ -21580,6 +22152,8 @@ a2de BF4 Family Crypto disabled [BlueField-4 SoC Crypto disabled] a2df BF4 Family integrated network controller [BlueField-4 integrated network controller] b200 ArcusE + b201 LibraE + b202 Arcus2 c2d1 BlueField DPU Family Auxiliary Communication Channel [BlueField Family] c2d2 MT416842 BlueField SoC management interfac c2d3 MT42822 BlueField-2 SoC Management Interface @@ -21604,6 +22178,7 @@ d2f2 Quantum-2 NDR (400Gbps) switch d2f4 Quantum-3 d2f6 Quantum-3CPO + d2f8 Quantum-4 15b4 CCI/TRIAD 15b5 Cimetrics Inc 15b6 Texas Memory Systems Inc @@ -21627,10 +22202,10 @@ 2001 Skyhawk Series NVME SSD 5001 WD Black NVMe SSD 5002 SanDisk Extreme Pro / WD Black 2018/SN750/PC SN720 NVMe SSD - 5003 WD Blue SN500 / PC SN520 NVMe SSD - 5004 PC SN520 NVMe SSD - 5005 PC SN520 NVMe SSD - 5006 WD Black SN750 / PC SN730 / Red SN700 NVMe SSD + 5003 WD Blue SN500 / PC SN520 x2 M.2 2280 NVMe SSD + 5004 PC SN520 x2 M.2 2230 NVMe SSD + 5005 PC SN520 x2 M.2 2242 NVMe SSD + 5006 SanDisk Extreme Pro / WD Black SN750 / PC SN730 / Red SN700 NVMe SSD 5007 IX SN530 NVMe SSD (DRAM-less) 5008 PC SN530 NVMe SSD (DRAM-less) 5009 SanDisk Ultra 3D / WD Blue SN550 NVMe SSD @@ -21639,19 +22214,23 @@ 1414 500b Xbox Series X 500d WD Ultrastar DC SN340 NVMe SSD 5011 WD PC SN810 / Black SN850 NVMe SSD - 5014 WD Green SN350 NVMe SSD 1 TB (DRAM-less) + 5014 WD PC SN540 / Green SN350 NVMe SSD 1 TB (DRAM-less) 5015 PC SN740 NVMe SSD (DRAM-less) 5016 WD PC SN740 NVMe SSD 512GB (DRAM-less) 5017 WD Black SN770 / PC SN740 256GB / PC SN560 (DRAM-less) NVMe SSD - 5019 WD Green SN350 NVMe SSD 240GB (DRAM-less) - 501a WD Blue SN570 NVMe SSD + 5019 WD Green SN350 240GB (DRAM-less) / SN560E NVMe SSD + 501a SanDisk Ultra 3D / WD Blue SN570 NVMe SSD (DRAM-less) 501d WD Blue SN550 NVMe SSD 2TB (DRAM-less) - 501e PC SN735 NVMe SSD (DRAM-less) + 501e PC SN735 / WD_BLACK SN750 SE NVMe SSD (DRAM-less) 501f WD PC SN735 NVMe SSD 512GB (DRAM-less) 5025 WD Blue SN570 NVMe SSD 2TB 5026 WD PC SN735 NVMe SSD 1TB (DRAM-less) + 5028 WD CH SN560 NVMe SSD 5030 WD Black SN850X NVMe SSD + 5034 WD PC SN5000S M.2 2230 NVMe SSD (DRAM-less) + 5036 WD PC SN5000S M.2 2280 NVMe SSD (DRAM-less) 5041 WD Blue SN580 NVMe SSD (DRAM-less) + 5042 WD Black SN770M NVMe SSD (DRAM-less) 15b8 ADDI-DATA GmbH 1001 APCI1516 SP controller (16 digi outputs) 1003 APCI1032 SP controller (32 digi inputs w/ opto coupler) @@ -21673,7 +22252,15 @@ 117c 0022 Celerity FC-42XS Fibre Channel Adapter 117c 0025 Celerity FC-44ES Fibre Channel Adapter 117c 0026 Celerity FC-42ES Fibre Channel Adapter + 0500 Infiniium Memory Controller Interface + 0501 Infiniium Acquisition System Interface + 0507 Infiniium Acquisition System (80000 series) + 0508 Infiniium Acquisition Support + 0b01 82350B PCI GPIB 1100 E8001-66442 PCI Express CIC + 1218 82351A PCI Express GPIB + 12d6 82350C PCI GPIB + 12d7 82351B PCI Express GPIB 2922 64 Bit, 133MHz PCI-X Exerciser & Protocol Checker 2928 64 Bit, 66MHz PCI Exerciser & Analyzer 2929 64 Bit, 133MHz PCI-X Analyzer & Exerciser @@ -21709,7 +22296,6 @@ 15d8 Cybernetics Technology Co Ltd 15d9 Super Micro Computer Inc 1b64 SCC-B8SB80-B1 - 1b67 AOC-S3916L-H16iR-32DD 1b9d Supermicro AOC-S3816L-L16IR 1c6e Supermicro AOC-SLG4-2H8M2 15da Cyberfirm Inc @@ -21927,6 +22513,11 @@ 165f Linux Media Labs, LLC 1020 LMLM4 MPEG-4 encoder 1661 Worldspace Corp. +1665 EDAX Inc +# P/N 4035.006.19720 + 1973 DPP-II FR2 Board +# P/N 4035.065.20000 + 2000 SG-IIP Board 1668 Actiontec Electronics Inc 0100 Mini-PCI bridge # Formerly SiByte, Inc. @@ -22363,6 +22954,9 @@ 7054 APA7-504 Reconfigurable Artix-7 52,160 Cell FPGA module 24 LVDS channels 7072 AP731 Multi-function I/O Module with 12-bit DAC 7073 AP730 Multi-function I/O Module 16 Digital I/O 8 Differential Analog In 4 Analog Out + 7731 APZU-301 Zynq Ultrascale+ Module 28 TTL channels + 7733 APZU-303 Zynq Ultrascale+ Module 20 TTL & 3 RS485/422 channels + 7734 APZU-304 Zynq Ultrascale+ Module 14 LVDS channels 16da Advantech Co., Ltd. 0011 INES GPIB-PCI 16df PIKA Technologies Inc. @@ -22463,6 +23057,7 @@ 13c8 AEP SureWare Runner 1000V3 # nee Fujitsu Siemens Computers GmbH 1734 Fujitsu Technology Solutions + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 1735 Aten International Co. Ltd. 1737 Linksys 0029 WPG54G ver. 4 PCI Card @@ -22485,6 +23080,7 @@ 1745 ViXS Systems, Inc. 2020 XCode II Series 2100 XCode 2100 Series + 1043 48c9 My Cinema PE6200 Analoog 1749 RLX Technologies 174b PC Partner Limited / Sapphire Technology 174d WellX Telecom SA @@ -22741,6 +23337,7 @@ 0006 LENSE30512GMSP34MEAT3TA 3181 ThinkCentre M75n IoT 402b Intel 82599ES 10Gb 2-port Server Adapter X520-2 + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 17ab Phillips Components 17af Hightech Information System Ltd. 17b3 Hawking Technologies @@ -22762,23 +23359,34 @@ 0002 AGN300 802.11 a/b/g True MIMO Wireless Card 1385 6d00 WPNT511 RangeMax 240 Mbps Wireless CardBus Adapter 1737 0054 WPC54GX4 v1 802.11g Wireless-G Notebook Adapter with SRX400 + 0104 APQ8096 PCIe Root Complex [Snapdragon 820] 0105 MSM8998 PCIe Root Complex + 0106 SDM850 PCIe Root Complex [Snapdragon 850] + 0107 SDM850 PCIe Root Port [Snapdragon 850] 0108 SM8150 PCIe Root Complex 0109 SA8195P PCIe Root Complex + 010b SM8250 PCIe Root Complex [Snapdragon 865/870 5G] + 010c SM8350 PCIe Root Complex [Snapdragon 888] 010e SC8280XP PCI Express Root Port + 0110 SM8475 PCIe Root Complex [Snapdragon 8+ Gen 1] 0300 MDM9x35 LTE Modem [Snapdragon X7] 0301 MDM9x45 LTE Modem [Snapdragon X12] 0302 MDM9x55 LTE Modem [Snapdragon X16] + 0304 SDX24 [Snapdragon X24 4G] + 0306 SDX55 [Snapdragon X55 5G] 0400 Datacenter Technologies QDF2432 PCI Express Root Port 0401 Datacenter Technologies QDF2400 PCI Express Root Port 1000 QCS405 PCIe Root Complex 1101 QCA6390 Wireless Network Adapter 1103 QCNFA765 Wireless Network Adapter 1104 QCN6024/9024/9074 Wireless Network Adapter - 1108 IPQ95xx/97xx PCI Express Root Port + 1107 WCN785x Wi-Fi 7(802.11be) 320MHz 2x2 [FastConnect 7800] + 105b e0f7 High Band Simultaneous Wireless Network Adapter + 1108 IPQ95xx/97xx PCIe Root Port 1109 QCN62xx/92xx Wireless Network Adapter 17cc NetChip Technology, Inc - 2280 USB 2.0 + 2280 NET2280 PCI to USB 2.0 Hi-Speed Peripheral Controller + 2282 NET2282 PCI to USB 2.0 Hi-Speed Peripheral Controller 17cd Cadence Design Systems, Inc. 17cf Z-Com, Inc. 17d3 Areca Technology Corp. @@ -22861,6 +23469,7 @@ 17d5 7831 X3120 Dual Port 10GBase-CR 17db Cray Inc 0101 XT Series [Seastar] 3D Toroidal Router + 0501 Cassini 1 [Slingshot 200Gb] 17de KWorld Computer Co. Ltd. 17df Dini Group 1864 Virtex4 PCI Board w/ QL5064 Bridge [DN7000K10PCI/DN8000K10PCI/DN8000K10PSX/NOTUS] @@ -22978,6 +23587,35 @@ 1804 Ralink corp. (wrong ID) 3060 RT3060 Wireless 802.11n 1T/1R 1805 Euresys S.A. + 0201 PICOLO Alert PCI + 0202 PICOLO Diligent + 0204 PICOLO Alert-RC + 0205 PICOLO Alert PCIe + 0206 PICOLO Diligent Plus PCIe + 0207 PICOLO Alert-RC PCIe + 0300 GRABLINK Expert 2 + 0301 GRABLINK Quickpack ColorScan + 0302 GRABLINK Value cPCI + 0303 GRABLINK Expert 2 cPCI + 0305 GRABLINK Avenue + 0306 GRABLINK Quickpack CFA + 0307 GRABLINK Express + 0308 GRABLINK Quickpack CFA PCIe + 0309 GRABLINK Quickpack CFA PCIe (Recovery) + 030a GRABLINK Full + 030b GRABLINK Full (Recovery) + 030c GRABLINK DualBase + 030d GRABLINK DualBase (Recovery) + 030e GRABLINK Base + 030f GRABLINK Base (Recovery) + 0310 GRABLINK Full XR + 0311 GRABLINK Full XR (Recovery) + 0401 DOMINO Iota + 0402 DOMINO Alpha 2 + 0403 DOMINO Harmony + 0404 DOMINO Melody + 0407 DOMINO Symphony + 0408 DOMINO Symphony PCIe 1809 Lumanate, Inc. 180c IEI Integration Corp 1813 Ambient Technologies Inc @@ -23042,6 +23680,7 @@ 0701 RT2760 Wireless 802.11n 1T/2R 1737 0074 WMP110 v2 802.11n RangePlus Wireless PCI Adapter 0781 RT2790 Wireless 802.11n 1T/2R PCIe + 11ad 7600 HP WN7600R 1814 2790 RT2790 Wireless 802.11n 1T/2R PCIe 3060 RT3060 Wireless 802.11n 1T/1R 1186 3c04 DWA-525 Wireless N 150 Desktop Adapter (rev.A1) @@ -23094,6 +23733,7 @@ 08b0 MVC200-DC 1846 Alcatel-Lucent 1849 ASRock Incorporation + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 184a Thales Computers 1100 MAX II cPLD 1850 Advantest Corporation @@ -23333,6 +23973,7 @@ 0013 SH7757 PCIe Switch [PS] 0014 uPD720201 USB 3.0 Host Controller 0015 uPD720202 USB 3.0 Host Controller + 4c52 9a72 LRSU9A72 2-Port USB 3.0 Exchange Adapter 001a SH7758 PCIe-PCI Bridge [PPB] 001b SH7758 PCIe End-Point [PBI] 001d SH7758 PCIe Switch [PS] @@ -23342,7 +23983,7 @@ 0100 A104d QUAD T1/E1 AFT card 0300 A101 single-port T1/E1 0400 A104u Quad T1/E1 AFT -1924 Solarflare Communications +1924 AMD Solarflare 0703 SFC4000 rev A net [Solarstorm] 10b8 0102 SMC10GPCIe-10BT (A2) [TigerCard] 10b8 0103 SMC10GPCIe-10BT (A3) [TigerCard] @@ -23495,6 +24136,8 @@ 1942 ClearSpeed Technology plc e511 Advance X620 accelerator card e521 Advance e620 accelerator card +1945 MERA + 6200 PXI/PXIe measurement module 1947 C-guys, Inc. 4743 CG200 Dual SD/SDIO Host controller device 1948 Alpha Networks Inc. @@ -23592,6 +24235,7 @@ 7010 MPC8641 PCI Host Bridge 7011 MPC8641D PCI Host Bridge 7018 MPC8610 + 81c0 LS1046A PCI Express Bridge c006 MPC8308 1a56 1201 Bigfoot Killer E2100 Gigabit Ethernet Controller # PCIe interface for emulator @@ -23642,10 +24286,12 @@ 1043 1477 N56VZ 10a0 QCA8172 Fast Ethernet 10a1 QCA8171 Gigabit Ethernet + 2010 QCA8175 card reader controller 2048 Attansic L2 Fast Ethernet 2060 AR8152 v1.1 Fast Ethernet 2062 AR8152 v2.0 Fast Ethernet 1043 8468 Eee PC 1015PX + 3010 QCA8175 SD controller # E2200, E2201, E2205 e091 Killer E220x Gigabit Ethernet Controller e0a1 Killer E2400 Gigabit Ethernet Controller @@ -23657,8 +24303,11 @@ 196d Club-3D BV 196e PNY 1971 AGEIA Technologies, Inc. - 1011 Physics Processing Unit [PhysX] + 0000 Physics Processing Unit [PhysX] 100 Series PCI Express Card +# The PCI and PCIe versions have a different PID + 1011 Physics Processing Unit [PhysX] 100 Series PCI Card 1043 0001 PhysX P1 + 1021 Physics Processing Unit [PhysX] 200 Series PCI Express Card # nee Eberspaecher Electronics 1974 Star Electronics GmbH & Co. KG 0009 FlexCard PMC-II @@ -23708,13 +24357,16 @@ 16ff OX16C954 HOST-B 1987 Phison Electronics Corporation 5007 E7 NVMe Controller - 5008 E8 PCIe3 NVMe Controller + 5008 E8 PCIe3 x2 NVMe Controller 5012 E12 NVMe Controller - 5013 PS5013 E13 NVMe Controller + 5013 PS5013-E13 PCIe3 NVMe Controller (DRAM-less) + 5015 PS5015-E15 PCIe3 NVMe Controller (DRAM-less) 5016 E16 PCIe4 NVMe Controller 5018 E18 PCIe4 NVMe Controller 5019 PS5019-E19 PCIe4 NVMe Controller (DRAM-less) 5021 PS5021-E21 PCIe4 NVMe Controller (DRAM-less) + 5026 PS5026-E26 PCIe5 NVMe Controller + 5027 PS5027-E27T PCIe4 NVMe Controller (DRAM-less) 1989 Montilio Inc. 0001 RapidFile Bridge 8001 RapidFile @@ -24173,47 +24825,66 @@ 1050 Virtio 1.0 GPU 1052 Virtio 1.0 input 1053 Virtio 1.0 socket + 1058 virtio-mem 105a Virtio file system 1110 Inter-VM shared memory 1af4 1100 QEMU Virtual Machine 1af5 Netezza Corp. 1afa J & W Electronics Co., Ltd. 1b00 Montage Technology Co., Ltd. + c001 CXL Memory Expander Controller M88MX5891 + 1ff9 00a2 CXL Memory Expander + 1ff9 00a4 CXL Memory Expander 1b03 Magnum Semiconductor, Inc, 6100 DXT/DXTPro Multiformat Broadcast HD/SD Encoder/Decoder/Transcoder 7000 D7 Multiformat Broadcast HD/SD Encoder/Decoder/Transcoder 1b08 MSC Technologies GmbH 1b0a Pegatron + 9602 RS780/RS880 PCI to PCI bridge (int gfx) 1b13 Jaton Corp 1b1a K&F Computing Research Co. 0e70 GRAPE 1b1c Corsair 1b21 ASMedia Technology Inc. - 0611 ASM1061 SATA IDE Controller - 0612 ASM1062 Serial ATA Controller + 0611 ASM1061 Serial ATA Controller + 0612 ASM1061/ASM1062 Serial ATA Controller 1849 0612 Motherboard + 0622 ASM106x Serial ATA AHCI Controller + 4c52 9661 LRST9661 2-port M.2 SATA3(6Gb/s) Raid Adapter + 0624 ASM106x SATA/RAID Controller 0625 106x SATA/RAID Controller - 1040 ASM1040 XHCI Controller + 1040 ASM1040 SuperSpeed USB Host Controller + 1041 ASM1041 SuperSpeed USB Host Controller 1042 ASM1042 SuperSpeed USB Host Controller 1043 1059 K53SM motherboard 1043 8488 P8B WS Motherboard 1849 1042 Motherboard + 1064 ASM1064 Serial ATA Controller 1080 ASM1083/1085 PCIe to PCI Bridge 1849 1080 Motherboard 1142 ASM1042A USB 3.0 Host Controller + 1164 ASM1164 Serial ATA AHCI Controller 1166 ASM1166 Serial ATA Controller 1182 ASM1182e 2-Port PCIe x1 Gen2 Packet Switch 1b21 118f ASM1182e 2-Port PCIe x1 Gen2 Packet Switch 1184 ASM1184e 4-Port PCIe x1 Gen2 Packet Switch 1849 1184 ASM1184e 4-Port PCIe x1 Gen2 Packet Switch 1187 ASM1187e 7-Port PCIe x1 Gen2 Packet Switch + 118f ASM1187e 7-Port PCIe x1 Gen2 Packet Switch 1242 ASM1142 USB 3.1 Host Controller + 4c52 9a42 LRSU9A42 2-Port Type-A Exchange Adapter 1343 ASM1143 USB 3.1 Host Controller + 1806 ASM1806 4-Port PCIe x2 Gen2 Packet Switch 1812 ASM1812 6-Port PCIe x4 Gen2 Packet Switch + 1824 ASM1824 12-Port PCIe x8 Gen2 Packet Switch 2142 ASM2142/ASM3142 USB 3.1 Host Controller 1462 7a72 H270 PC MATE + 2806 ASM2806 4-Port PCIe x2 Gen3 Packet Switch + 2812 ASM2812 6-Port PCIe x4 Gen3 Packet Switch 2824 ASM2824 PCIe Gen3 Packet Switch 3042 ASM3042 USB 3.2 Gen 1 xHCI Controller + 3142 ASM3142 USB 3.2 Gen 2x1 xHCI Controller + 3241 ASM3241 USB 3.2 Gen 2 Host Controller 3242 ASM3242 USB 3.2 Host Controller 1b26 Netcope Technologies, a.s. c132 COMBO-LXT155 @@ -24246,6 +24917,7 @@ 000c QEMU PCIe Root port 000d QEMU XHCI Host Controller 0010 QEMU NVM Express Controller + 0011 QEMU PVPanic device 0013 QEMU UFS Host Controller 0100 QXL paravirtual graphic card 1af4 1100 QEMU Virtual Machine @@ -24290,9 +24962,14 @@ 1028 2113 BOSS-N1 Modular 1028 2151 BOSS-N1 Modular ET 1028 2196 ROR-N1 + 1028 2286 BOSS-N1 DC-MHS + 1028 2287 BOSS-N1 Modular 1b4b 2241 Santa Cruz NVMe Host Adapter + 1b96 4000 WD_BLACK AN1500 NVMe SSD 1d49 0306 ThinkSystem M.2 NVMe 2-Bay RAID Enablement Kit 1d49 0307 ThinkSystem 7mm NVMe 2-Bay Rear RAID Enablement Kit + 4c52 9541 LRNV9541 2-port M.2 NVMe Raid Adapter + 2b42 88W8997 2.4/5 GHz Dual-Band 2x2 Wi-Fi® 5 (802.11ac) + Bluetooth® 5.3 Solution 2b43 NXP 88W9098 Wi-Fi 6 (ax) MAC #1 2b44 NXP 88W9098 Wi-Fi 6 (ax) MAC #2 2b45 NXP 88W9098 Bluetooth 5.3 @@ -24300,6 +24977,7 @@ 9123 88SE9123 PCIe SATA 6.0 Gb/s controller dc93 600e DC-6xxe series SATA 6G controller 9125 88SE9125 PCIe SATA 6.0 Gb/s controller + 4c52 9615 LRST9615 4-port SATA3(6Gb/s) Exchange Adapter 9128 88SE9128 PCIe SATA 6 Gb/s RAID controller 9130 88SE9128 PCIe SATA 6 Gb/s RAID controller with HyperDuo 1043 8438 P8P67 Deluxe Motherboard @@ -24328,6 +25006,7 @@ 1d49 0303 ThinkSystem SE350 M.2 SATA 4-Bay Data RAID Mirroring Enablement Kit 1d49 0304 ThinkSystem M.2 SATA 2-Bay RAID Enablement Kit 1d49 0305 ThinkSystem 7mm SATA 2-Bay Rear RAID Enablement Kit + 4c52 9630 LRST9630 4-port SATA3(6Gb/s) Raid Adapter 9235 88SE9235 PCIe 2.0 x2 4-port SATA 6 Gb/s Controller 9445 88SE9445 PCIe 2.0 x4 4-Port SAS/SATA 6 Gbps RAID Controller 9480 88SE9480 SAS/SATA 6Gb/s RAID controller @@ -24341,6 +25020,22 @@ # 2xHDMI and 2xHD-SDI inputs e5f4 MPEG2 and H264 Encoder-Transcoder f1c4 Dual ASI-RX/TX-CI card +1b5e STAR-Dundee Ltd. + 0001 SpaceWire PCI Mk2 + 0002 SpaceWire PCIe Mk1 + 0003 SpaceWire cPCI Mk2 + 0004 SpaceWire PXI Recorder Mk1 + 0005 SpaceWire PXI Interface Mk1 + 0006 SpaceWire PXI Interface Mk1 with RMAP Target + 0008 SpaceWire PXI Router Mk1 + 000b SpaceWire PXI Interface Mk2 + 000c SpaceWire PXI Interface Mk2 with RMAP Target + 000d SpaceWire PXI Router Mk2 + 000e SpaceWire PXI Recorder Mk2 + 0100 STAR-Ultra PCIe + 0102 STAR-Ultra Single-Lane Router + 0200 SpaceWire PCIe Mk2 +1b61 Byd Precision Manufacture Co.,Ltd 1b66 DELTACAST 0007 DELTA-3G-elp-d 1b6f Etron Technology, Inc. @@ -24380,6 +25075,12 @@ 2404 Ultrastar DC SN640 NVMe SSD 2500 Ultrastar DC SN840 NVMe SSD 2600 Ultrastar DC ZN540 ZNS NVMe SSD + 2700 Ultrastar DC SN650 NVMe SSD + 2701 Ultrastar DC SN650 NVMe SSD + 2702 Ultrastar DC SN650 NVMe SSD + 2720 Ultrastar DC SN650 NVMe SSD + 2721 Ultrastar DC SN650 NVMe SSD + 2722 Ultrastar DC SN655 NVMe SSD 3001 RapidFlex C2000 NVMe Initiator 3714 PC SN730 NVMe SSD 3734 PC SN730 NVMe SSD @@ -24495,11 +25196,18 @@ 1bb1 0179 Nytro 5360S - E3.S # Nytro 5360S (Rocinante Single Port) TCG - E3.S 1bb1 0180 Nytro 5360S TCG - E3.S +# Nytro 5060H (Rocinante High Performance) non-SED + 1bb1 0181 Nytro 5060H 1bb1 01a1 Nytro XP7102 5012 FireCuda/IronWolf 510 SSD 5013 BarraCuda Q5 NVMe SSD (DRAM-less) 5016 FireCuda 520/IronWolf 525 SSD 5018 FireCuda 530 SSD + 5019 BarraCuda PCIe SSD (DRAM-less) +# 2TB + 5021 FireCuda 520 SSD +# 1TB + 5026 FireCuda 540 SSD 1bb3 Bluecherry 4304 BC-04120A MPEG4 4 port video encoder / decoder 4309 BC-08240A MPEG4 4 port video encoder / decoder @@ -24559,17 +25267,40 @@ 100c NS8510G1Uxxx, NS8610G1Uxxx NVME SSD 100e NS8500G2Uxxxx, NS8600G2Uxxxx NVME SSD 1bee IXXAT Automation GmbH + 0002 CAN-IB100/PCIe 0003 CAN-IB200/PCIe + 0004 CAN-IB120/PCIe Mini + 0005 CAN-IB130/PCIe 104 + 0006 CAN-IB230/PCIe 104 + 000e CAN-IB500/PCIe + 000f CAN-IB600/PCIe + 0010 CAN-IB300/PCI + 0011 CAN-IB400/PCI + 0012 CAN-IB520/PCIe Mini + 0016 CAN-IB410/PMC + 001c CAN-IB810/PMC + 001e INpact PCIe + 001f INpact PCIe Mini + 0029 INpact M.2 + 002d CAN-IB630/PCIe 104 + 002e CAN-IB640/PCIe 1bef Lantiq 0011 MIPS SoC PCI Express Port 1bf4 VTI Instruments Corporation 0001 SentinelEX 7011 RX0xxx 1bf5 Greenliant + 1000 G7200 series U.2 NVMe SSD 1bfc Duagon AG 1bfd EeeTOP 1c00 Nanjing Qinheng Microelectronics Co., Ltd. + 2170 CH351 PCIe Parallel Port Adapter + 2273 CH351 PCIe Dual Port Serial Adapter + 3050 CH382L PCIe Parallel Port Adapter + 3250 CH384 Dual Port Serial and Parallel Port Adapter 3252 CH382 PCIe Dual Port Serial Adapter +# Device ID reused: CH352 is for PCI bus, CH382 for PCIe. + 3253 CH352/CH382 PCI/PCIe Dual Port Serial Adapter 1c09 CSP, Inc. 4254 10G-PCIE3-8D-2S 4255 10G-PCIE3-8D-Q @@ -24596,6 +25327,7 @@ 0020 FD2110 0021 FD722 0022 FD788 + 0023 FD722-M2 1c28 Lite-On IT Corp. / Plextor 0122 M6e PCI Express SSD [Marvell 88SS9183] # previously Fiberblaze @@ -24624,6 +25356,7 @@ a000 FBC2CGG3 Capture 2x40Gb [Mango_02] a001 FBC2CGG3 Capture 2x100Gb [Mango_02] a003 FBC2CGG3 Capture 16x10Gb [Mango] + a006 FBC2CGG3 Capture 2x100Gb [Mango] a007 FBC2CGG3 Capture 2x40Gb [Mango] a008 FBC2CGG3 Capture 2x25Gb [Mango] a009 FBC2CGG3 Capture 16x10Gb [Mango] @@ -24636,6 +25369,8 @@ a013 FB2CGHH Capture 2x25Gb [Tivoli] a014 FB2CGHH Capture 8x10Gb [Tivoli] a015 FB2CGHH Capture 2x100Gb [Tivoli] + a016 FB2CG Capture 8x25Gb [Savona] + a017 FB2CGHH Capture 8x25Gb [Tivoli] a017 # Used on V120 VME Crate Controller 1c32 Highland Technology, Inc. 1c33 Daktronics, Inc @@ -24658,6 +25393,7 @@ 0023 Ultrastar SN200 Series NVMe SSD 1c58 8823 Ultrastar Memory (ME200) 1c5c SK hynix + 1069 PCB01 NVMe Solid State Drive 1282 PC300 NVMe Solid State Drive 128GB 1283 PC300 NVMe Solid State Drive 256GB 1284 PC300 NVMe Solid State Drive 512GB @@ -24677,6 +25413,7 @@ 2429 PE6011 NVMe Solid State Drive 243b PE6110 NVMe Solid State Drive 1c5c 0100 PE6110 NVMe Solid State Drive + 2527 PE4010 NVMe Solid State Drive 2839 PE8000 Series NVMe Solid State Drive 1028 2143 DC NVMe SED PE8010 RI U.2 960GB 1028 2144 DC NVMe PE8010 RI U.2 960GB @@ -24699,6 +25436,8 @@ 1c5c 0101 PE81x0 U.2/3 NVMe Solid State Drive 284a PE8110 Series NVMe Solid State Drive 2a49 PE9110 Series NVMe Solid State Drive + 2a59 PE9010 Series NVMe Solid State Drives + 2b59 PS10x0 Series NVMe Solid State Drives 1c5f Beijing Memblaze Technology Co. Ltd. 000d PBlaze5 520/526 1c5f 0220 NVMe SSD PBlaze5 520 1920G AIC @@ -24721,11 +25460,18 @@ 1c5f 0b40 NVMe SSD PBlaze6 6530 7680G AIC 1c5f 0b41 NVMe SSD PBlaze6 6530 7680G 2.5" U.2 1c5f 0b47 NVMe SSD PBlaze6 6630 7680G 2.5" U.2 + 1c5f 1320 NVMe SSD PBlaze6 6531 1920G AIC 1c5f 1321 NVMe SSD PBlaze6 6531 1920G 2.5" U.2 + 1c5f 1330 NVMe SSD PBlaze6 6531 3840G AIC 1c5f 1331 NVMe SSD PBlaze6 6531 3840G 2.5" U.2 + 1c5f 1340 NVMe SSD PBlaze6 6531 7680G AIC 1c5f 1341 NVMe SSD PBlaze6 6531 7680G 2.5" U.2 + 1c5f 1421 NVMe SSD PBlaze6 6541 1920G 2.5" U.2 + 1c5f 1427 NVMe SSD PBlaze6 6641 1920G 2.5" U.2(dual port) 1c5f 1431 NVMe SSD PBlaze6 6541 3840G 2.5" U.2 + 1c5f 1437 NVMe SSD PBlaze6 6641 3840G 2.5" U.2(dual port) 1c5f 1441 NVMe SSD PBlaze6 6541 7680G 2.5" U.2 + 1c5f 1447 NVMe SSD PBlaze6 6641 7680G 2.5" U.2(dual port) 1c5f 4b20 NVMe SSD PBlaze6 6536 1600G AIC 1c5f 4b21 NVMe SSD PBlaze6 6536 1600G 2.5" U.2 1c5f 4b25 NVMe SSD PBlaze6 6536 1600G E1.S @@ -24737,11 +25483,18 @@ 1c5f 4b40 NVMe SSD PBlaze6 6536 6400G AIC 1c5f 4b41 NVMe SSD PBlaze6 6536 6400G 2.5" U.2 1c5f 4b47 NVMe SSD PBlaze6 6636 6400G 2.5" U.2 + 1c5f 5320 NVMe SSD PBlaze6 6537 1600G AIC 1c5f 5321 NVMe SSD PBlaze6 6537 1600G 2.5" U.2 + 1c5f 5330 NVMe SSD PBlaze6 6537 3200G AIC 1c5f 5331 NVMe SSD PBlaze6 6537 3200G 2.5" U.2 + 1c5f 5340 NVMe SSD PBlaze6 6537 6400G AIC 1c5f 5341 NVMe SSD PBlaze6 6537 6400G 2.5" U.2 + 1c5f 5421 NVMe SSD PBlaze6 6547 1600G 2.5" U.2 + 1c5f 5427 NVMe SSD PBlaze6 6647 1600G 2.5" U.2(dual port) 1c5f 5431 NVMe SSD PBlaze6 6547 3200G 2.5" U.2 + 1c5f 5437 NVMe SSD PBlaze6 6647 3200G 2.5" U.2(dual port) 1c5f 5441 NVMe SSD PBlaze6 6547 6400G 2.5" U.2 + 1c5f 5447 NVMe SSD PBlaze6 6647 6400G 2.5" U.2(dual port) 003d PBlaze5 920/926 1c5f 0a30 NVMe SSD PBlaze5 920 3840G AIC 1c5f 0a31 NVMe SSD PBlaze5 920 3840G 2.5" U.2 @@ -24764,12 +25517,37 @@ 1c5f 4b51 NVMe SSD PBlaze6 6936 12800GB 2.5" U.3 1c5f 4b61 NVMe SSD PBlaze6 6936 25600GB 2.5" U.3 003f PBlaze7 7940/7946 NVMe SSD + 1c5f 0431 NVMe SSD PBlaze7 7940 3840G 2.5" U.2 + 1c5f 0c31 NVMe SSD PBlaze7 7940 3840G 2.5" U.2 + 1c5f 0c41 NVMe SSD PBlaze7 7940 7680G 2.5" U.2 + 1c5f 0c51 NVMe SSD PBlaze7 7940 15360G 2.5" U.2 + 1c5f 1430 NVMe SSD PBlaze7 7940 3840G AIC 1c5f 1431 NVMe SSD PBlaze7 7940 3840G 2.5" U.2 + 1c5f 1435 NVMe SSD PBlaze7 7940 3840G E1.S + 1c5f 1440 NVMe SSD PBlaze7 7940 7680G AIC 1c5f 1441 NVMe SSD PBlaze7 7940 7680G 2.5" U.2 + 1c5f 1445 NVMe SSD PBlaze7 7940 7680G E1.S + 1c5f 1450 NVMe SSD PBlaze7 7940 15360G AIC 1c5f 1451 NVMe SSD PBlaze7 7940 15360G 2.5" U.2 + 1c5f 4c31 NVMe SSD PBlaze7 7946 3200G 2.5" U.2 + 1c5f 4c41 NVMe SSD PBlaze7 7946 6400G 2.5" U.2 + 1c5f 4c51 NVMe SSD PBlaze7 7946 12800G 2.5" U.2 + 1c5f 5430 NVMe SSD PBlaze7 7946 3200G AIC 1c5f 5431 NVMe SSD PBlaze7 7946 3200G 2.5" U.2 + 1c5f 5435 NVMe SSD PBlaze7 7946 3200G E1.S + 1c5f 5440 NVMe SSD PBlaze7 7946 6400G AIC 1c5f 5441 NVMe SSD PBlaze7 7946 6400G 2.5" U.2 + 1c5f 5445 NVMe SSD PBlaze7 7946 6400G E1.S + 1c5f 5450 NVMe SSD PBlaze7 7946 12800G AIC 1c5f 5451 NVMe SSD PBlaze7 7946 12800G 2.5" U.2 + 1ea0 2100 TP2510 Series U.2 NVMe Datacenter SSD(3.84TB) + 1ea0 2101 TP2511 Series U.2 NVMe Datacenter SSD(3.84TB) + 1ea0 2110 TP2510 Series E3.S NVMe Datacenter SSD(3.84TB) + 1ea0 2111 TP2511 Series E3.S NVMe Datacenter SSD(3.84TB) + 1ea0 2200 TP2510 Series U.2 NVMe Datacenter SSD(7.68TB) + 1ea0 2201 TP2511 Series U.2 NVMe Datacenter SSD(7.68TB) + 1ea0 2210 TP2510 Series E3.S NVMe Datacenter SSD(7.68TB) + 1ea0 2211 TP2511 Series E3.S NVMe Datacenter SSD(7.68TB) 0540 PBlaze4 NVMe SSD 0550 PBlaze5 700/900 0555 PBlaze5 510/516 @@ -24810,19 +25588,32 @@ 0002 Clarett 1cb8 Dawning Information Industry Co., Ltd. 1cc1 ADATA Technology Co., Ltd. + 1202 IM2P32A8 NVMe SSD (DRAM-less) # SX6000LNP 2263 XPG SX6000 Lite NVMe SSD (DRAM-less) + 32a8 SM2P32A8 NVMe SSD (DRAM-less) 33f3 IM2P33F3 NVMe SSD (DRAM-less) - 33f8 IM2P33F8ABR1 NVMe SSD - 5350 XPG GAMMIX S50 NVMe SSD - 5762 FALCON NVMe SSD - 5766 ADATA XPG GAMMIXS1 1L Media (256 GB SSD) + 33f4 IM2P33F4 NVMe SSD (DRAM-less) + 33f8 IM2P33F8 series NVMe SSD (DRAM-less) + 41c3 SM2P41C3 NVMe SSD (DRAM-less) + 41c8 SM2P41C8 NVMe SSD (DRAM-less) + 5236 XPG GAMMIX S70 BLADE NVMe SSD + 5350 XPG GAMMIX S50, S50 Lite NVMe SSD +# PREMIUM NVMe SSD for PlayStation 5 + 5370 GAMMIX S70 BLADE, PS5 PREMIUM NVMe SSD + 5762 FALCON, GAMMIX S41, SPECTRIX S40G NVMe SSD (DRAM-less) + 5763 XPG GAMMIX S5 NVMe SSD (DRAM-less) + 5766 XPG GAMMIXS1 1L, XPG GAMMIX S5, LEGEND 710 / 740, SWORDFISH NVMe SSD (DRAM-less) + 5772 LEGEND 850 LITE NVMe SSD (DRAM-less) 612a LEGEND 750 NVMe SSD (DRAM-less) - 613a LEGEND 840 NVMe SSD (DRAM-less) + 613a ATOM 50, LEGEND 840 NVMe SSD (DRAM-less) 621a LEGEND 850 NVMe SSD (DRAM-less) 622a LEGEND 960 NVMe SSD - 624a LEGEND 700 NVMe SSD (DRAM-less) - 627a LEGEND 800 NVMe SSD + 624a LEGEND 700, XPG GAMMIX S20 NVMe SSD (DRAM-less) +# 1TB + 627a LEGEND 800 NVMe SSD (DRAM-less) +# 500GB + 628a LEGEND 800 NVMe SSD (DRAM-less) 8201 XPG SX8200 Pro PCIe Gen3x4 M.2 2280 Solid State Drive 1cc4 Shenzhen Unionmemory Information System Ltd. 1203 NVMe SSD Controller UHXXXa series @@ -24841,9 +25632,10 @@ 17a9 RPITJ1TBVME2HWD NVMe SSD 1024GB 17aa AH631 PCIe 3.0 NVMe SSD 512GB 17ab AH631 PCIe 3.0 NVMe SSD 256GB - 2263 AM611 PCIe 3.0 NVMe SSD 256GB - 5008 AM610 PCIe 3.0 NVMe SSD 128GB + 2263 AM611 PCIe 3.0 x2 NVMe SSD 256GB + 5008 AM610 PCIe 3.0 x2 NVMe SSD 128GB, 256GB 5012 RPITJ512PED2OWX NVMe SSD 512GB + 5212 AM521 PCIe 3.0 NVMe SSD 256GB 6201 AM620 PCIe 3.0 NVMe SSD 128GB 6202 AM620 PCIe 3.0 NVMe SSD 256GB 6203 AM620 PCIe 3.0 NVMe SSD 512GB @@ -24851,14 +25643,37 @@ 6302 AM630 PCIe 4.0 NVMe SSD 256GB 6303 AM630 PCIe 4.0 x4 NVMe SSD Controller 6304 AM630 PCIe 4.0 NVMe SSD 1024GB + 6a02 AM6A0 PCIe 4.0 NVMe SSD 256GB 6a03 RPETJ512MKP1QDQ PCIe 4.0 NVMe SSD 512GB (DRAM-less) + 6a13 RPJYJ512MKN1QWQ PCIe 4.0 NVMe SSD 512GB (DRAM-less) 6a14 RPEYJ1T24MKN2QWY PCIe 4.0 NVMe SSD 1024GB (DRAM-less) + 8030 NVMe SSD Controller UH8X2X/UH7X2X series + 1cc4 1122 NVMe SSD UH812a U.2 1.92TB + 1cc4 1123 NVMe SSD UH812a U.2 3.84TB + 1cc4 1124 NVMe SSD UH812a U.2 7.68TB + 1cc4 1125 NVMe SSD UH812a U.2 15.36TB + 1cc4 1222 NVMe SSD UH812a E3.S 1.92TB + 1cc4 1223 NVMe SSD UH812a E3.S 3.84TB + 1cc4 1224 NVMe SSD UH812a E3.S 7.68TB + 1cc4 1225 NVMe SSD UH812a E3.S 15.36TB + 1cc4 2112 NVMe SSD UH832a U.2 1.6TB + 1cc4 2113 NVMe SSD UH832a U.2 3.2TB + 1cc4 2114 NVMe SSD UH832a U.2 6.4TB + 1cc4 2115 NVMe SSD UH832a U.2 12.8TB + 1cc4 2212 NVMe SSD UH832a E3.S 1.6TB + 1cc4 2213 NVMe SSD UH832a E3.S 3.2TB + 1cc4 2214 NVMe SSD UH832a E3.S 6.4TB + 1cc4 2215 NVMe SSD UH832a E3.S 12.8TB + 1cc4 3122 NVMe SSD UH712a U.2 1.92TB + 1cc4 3123 NVMe SSD UH712a U.2 3.84TB + 1cc4 3124 NVMe SSD UH712a U.2 7.68TB + 1cc4 3125 NVMe SSD UH712a U.2 15.36TB 1cc5 Embedded Intelligence, Inc. 0100 PCIe-CAN-02 Dual CAN bus (9-pin male). PCI Express x1. 0101 PCIe-CAN-01 Single CAN bus (9-pin male). PCI Express x1. 1cc7 Radian Memory Systems Inc. - 0200 RMS-200 - 0250 RMS-250 + 0200 RMS-200 PCIe NVMe SSD + 0250 RMS-250 U.2 NVMe SSD 1ccf Zoom Corporation 0001 TAC-2 Thunderbolt Audio Converter 1cd2 SesKion GmbH @@ -24871,6 +25686,7 @@ 0306 Simulyzer-RT CompactPCI Serial CAN-2 card (CAN-FD) 0307 Simulyzer-RT CompactPCI Serial DIO-2 card [Xilinx Zynq UltraScale+] 1cd7 Nanjing Magewell Electronics Co., Ltd. + 0002 Pro Capture AIO 0010 Pro Capture Endpoint 0014 PRO CAPTURE AIO 4K PLUS 0017 PRO CAPTURE AIO 4K @@ -24905,6 +25721,7 @@ 7164 NeuronDevice (Trainium) 1d0f 0000 Trainium 7264 NeuronDevice (Inferentia2) + 7364 NeuronDevice (Trainium2) 8061 NVMe EBS Controller cd01 NVMe SSD Controller ec20 Elastic Network Adapter (ENA) @@ -24920,55 +25737,67 @@ 0714 ZX-100/ZX-200 PCI Express Root Port 0715 ZX-100/ZX-200 PCI Express Root Port 0716 ZX-D PCI Express Root Port - 0717 KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 0718 KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 0719 KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 071a KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 071b KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 071c KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 071d KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port - 071e KX-5000/KX-6000/KX-6000G/KH-40000 Express Root Port + 0717 KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port + 0718 KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port + 0719 KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port + 071a KX-5000/KX-6000/KX-6000G/KH-40000 PCI Express Root Port + 071b KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port + 071c KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port + 071d KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port + 071e KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Express Root Port 071f ZX-200 Upstream Port of PCI Express Switch 0720 ZX-200 PCIE RC6 controller 0721 ZX-200 Downstream Port of PCI Express Switch 0722 ZX-200 PCIE P2C bridge - 0731 KH-40000 Express Root Port - 0732 KH-40000 Express Root Port + 0731 KH-40000 PCI Express Root Port + 0732 KH-40000/KX-7000 PCI Express Root Port + 0733 KX-7000 PCIE Express Root Port + 0734 KX-7000 PCIE Express Root Port + 0735 KX-7000 PCIE Express Root Port + 0736 KX-7000 PCIE Express Root Port + 0737 KX-7000 PCIE Express Root Port + 0738 KX-7000 PCIE Express Root Port + 0739 KX-7000 PCIE Express Root Port + 073a KX-7000 PCIE Express Root Port + 073b KX-7000 PCIE Express Root Port 1000 ZX-D Standard Host Bridge - 1001 ZX-D/ZX-E/KH-40000 Miscellaneous Bus + 1001 ZX-D/ZX-E/KH-40000/KX-7000 Miscellaneous Bus 1003 ZX-E Standard Host Bridge 1005 KH-40000 Standard Host Bridge 1006 KX-6000G Standard Host Bridge + 1007 KX-7000 Standard Host Bridge 3001 ZX-100 Standard Host Bridge 300a ZX-100 Miscellaneous Bus - 3038 ZX-100/ZX-200/KX-6000/KX-6000G/KH-40000 Standard Universal PCI to USB Host Controller - 3104 ZX-100/ZX-200/KX-6000/KX-6000G/KH-40000 Standard Enhanced PCI to USB Host Controller - 31b0 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 Standard Host Bridge - 31b1 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 Standard Host Bridge - 31b2 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 DRAM Controller - 31b3 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 Power Management Controller - 31b4 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 I/O APIC - 31b5 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 Scratch Device - 31b7 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 Standard Host Bridge + 3038 ZX-100/ZX-200/KX-6000/KX-6000G/KH-40000/KX-7000 Standard Universal PCI to USB Host Controller + 3104 ZX-100/ZX-200/KX-6000/KX-6000G/KH-40000/KX-7000 Standard Enhanced PCI to USB Host Controller + 31b0 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 Standard Host Bridge + 31b1 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 Standard Host Bridge + 31b2 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 DRAM Controller + 31b3 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 Power Management Controller + 31b4 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 I/O APIC + 31b5 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 Scratch Device + 31b7 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 Standard Host Bridge 31b8 ZX-100/ZX-D PCI to PCI Bridge - 3288 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 High Definition Audio Controller + 3200 KX-7000 Host Bridge + 3288 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 High Definition Audio Controller 345b ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 Miscellaneous Bus 3a02 ZX-100 C-320 GPU 3a03 ZX-D C-860 GPU - 3a04 ZX-E C-960 GPU + 3a04 KX-6000 C-960 GPU + 3a05 KX-7000 C-1190 GPU 3b01 KH-40000 OPI Controller 3b02 KH-40000 OPI Controller 3b03 KH-40000 OPI Controller 3b04 KH-40000 OPI Controller - 3c00 KH-40000 DRAM Controller + 3c00 KH-40000/KX-7000 DRAM Controller 3c02 KX-6000G DRAM Controller 3d01 KX-6000G C-1080 GPU - 9002 ZX-100/ZX-200/KH-40000 EIDE Controller + 9002 ZX-100/ZX-200/KH-40000/KX-7000 EIDE Controller 9003 ZX-100/KX-6000/KX-6000G EIDE Controller - 9043 KX-6000G/KH-40000 RAID Controller + 9043 KX-6000G/KH-40000/KX-7000 RAID Controller 9045 ZX-100/ZX-D/ZX-E RAID Accelerator 0 9046 ZX-D/ZX-E RAID Accelerator 1 - 9083 ZX-100/ZX-200/KX-6000/KX-6000G/KH-40000 StorX AHCI Controller + 9083 ZX-100/ZX-200/KX-6000/KX-6000G/KH-40000/KX-7000 StorX AHCI Controller 9084 ZX-100 StorX AHCI Controller 9100 ZX-200 Cross bus 9101 ZX-200 Traffic Controller @@ -24976,17 +25805,21 @@ 9142 ZX-D High Definition Audio Controller 9144 ZX-E High Definition Audio Controller 9145 KX-6000G High Definition Audio Controller + 9146 KX-7000 High Definition Audio Controller 9180 ZX-200 Networking Gigabit Ethernet Adapter 91c1 KH-40000 ZPI Controller 91c2 KH-40000 ZPI Controller 9202 ZX-100 USB eXtensible Host Controller 9203 ZX-200 USB eXtensible Host Controller - 9204 KX-6000/KX-6000G USB eXtensible Host Controller + 9204 KX-6000/KX-6000G/KX-7000 USB3 xHCI Host Controller 9205 KH-40000 USB eXtensible Host Controller + 9206 KX-7000 USB4 Contoller 9286 ZX-D eMMC Host Controller - 9300 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000 eSPI Host Controller + 9300 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 eSPI Host Controller + 9500 KX-7000 I2S Controller + 9501 KX-7000 I2S Controller 95d0 ZX-100 Universal SD Host Controller - f410 ZX-100/KX-5000/KX-6000/KX-6000G PCI Com Port + f410 ZX-100/KX-5000/KX-6000/KX-6000G/KH-40000/KX-7000 PCI Com Port 1d18 RME 0001 Fireface UFX+ # acquired by Intel @@ -25022,7 +25855,6 @@ 1d44 DPT a400 PM2x24/PM3224 1d49 Lenovo - 0522 ThinkSystem RAID 5350-8i PCIe 12Gb Internal Adapter 1d4c Diamanti, Inc. 1d5c Fantasia Trading LLC 1d61 Technobox, Inc. @@ -25034,20 +25866,29 @@ 2440 CL2440 1d6a Aquantia Corp. 0001 AQC107 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] - 00b1 AQC100 10G Ethernet MAC controller [AQtion] - 07b1 AQC107 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] + 4c52 6880 LREC6880BT Single-port 10Gb Ethernet Network Adapter + 00b1 AQtion AQC100 NBase-T/IEEE 802.3an Ethernet Controller [Atlantic 10G] + 00c0 Antigua NBase-T/IEEE 802.3an Ethernet Controller - Engineering Sample + 04c0 AQtion AQC113 NBase-T/IEEE 802.3an Ethernet Controller [Antigua 10G] + 4c52 1053 LRES1053PT Quad-port 10Gb Ethernet Network Adapter + 4c52 2051 LRES2051PT Single-port 10Gb Ethernet Network Adapter + 07b1 AQtion AQC107 NBase-T/IEEE 802.3an Ethernet Controller [Atlantic 10G] # Older revision of QNAP QM2 M.2 2280 PCIe SSD & 10GbE Expansion Card 1baa 07b1 QM2-2P10G1TA [QXG 10GbE Network Adapter] # Newer revision of QNAP QM2 M.2 2280 PCIe SSD & 10GbE Expansion Card 1baa 07b2 QM2-2P10G1TA [QM2 Expansion Adapter] + 4c52 6880 LREC6880BT Single-port 10Gb Ethernet Network Adapter 08b1 AQC108 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] 11b1 AQC111 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] 12b1 AQC112 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] - 14c0 AQC113C NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] - 87b1 AQC107 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] - 94c0 AQC113CS NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] + 12c0 AQtion AQC115C NBase-T/IEEE 802.3bz Ethernet Controller [Antigua 2.5G] + 14c0 AQC113C NBase-T/IEEE 802.3an Ethernet Controller [Marvell Scalable mGig] + 80b1 AQtion AQC100S NBase-T/IEEE 802.3an Ethernet Controller [Atlantic 10G] + 87b1 AQtion AQC107S NBase-T/IEEE 802.3an Ethernet Controller [Atlantic 10G] + 93c0 AQtion AQC114CS NBase-T/IEEE 802.3bz Ethernet Controller [Antigua 5G] + 94c0 AQtion AQC113CS NBase-T/IEEE 802.3an Ethernet Controller [Antigua 10G] 1043 87f5 ProArt X570-CREATOR WIFI - d107 AQC107 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] + d107 AQtion AQC107 NBase-T/IEEE 802.3an Ethernet Controller [Atlantic 10G] 1043 8741 XG-C100C d108 AQC108 NBase-T/IEEE 802.3bz Ethernet Controller [AQtion] 1d6c Atomic Rules LLC @@ -25088,6 +25929,12 @@ 1021 AR-MGMT-PF [Board-Generic Management Physical Function] 1022 AR-ARKA-FX2 [Arkville 128B DPDK Data Mover for Agilex] 1023 AR-BD-FX1 [BufferDirect Zero-Bounce Data Motion Endpoint] + 1024 AR-TK242 [2x10GbE Packet Capture Device] + 1025 AR-TK242-FX2 [2x100GbE Gen5 Packet Capture Device] + 1026 AR-TK242-FX2 [1x200GbE Gen5 Packet Capture Device] + 1027 AR-P2P-DBG [P2P Debug Function] + 1028 AR-P2P-ATR [P2P Actor Function] + 1029 AR-P2P-UTL [P2P Utility Function] 4200 A5PL-E1-10GETI [10 GbE Ethernet Traffic Instrument] 1d72 Xiaomi 1d78 DERA Storage @@ -25153,10 +26000,10 @@ 1d78 7204 Aliflash V2 U.2 15mm 3.84TB NVMe SSD 1d78 7208 Aliflash V2 U.2 15mm 7.68TB NVMe SSD 1d79 Transcend Information, Inc. - 2262 NVMe PCIe SSD 220S - 2263 NVMe PCIe SSD 110S/112S/120S/MTE652T2 (DRAM-less) + 2262 NVMe PCIe SSD 220S/MTE662T2 + 2263 NVMe PCIe SSD 110S/112S/120S/MTE300S/MTE400S/MTE652T2 (DRAM-less) 2264 NVMe PCIe SSD 250H - 2267 NVMe PCIe SSD 240S/MTE710T + 2267 NVMe PCIe SSD 220S/240S/MTE710T 5766 NVMe PCIe SSD 110Q (DRAM-less) 1d7c Aerotech, Inc. # Fiber-optic HyperWire motion control bus from Aerotech. @@ -25214,10 +26061,15 @@ 0002 Colossus GC1 [S1] 1d97 Shenzhen Longsys Electronics Co., Ltd. 1062 Lexar NM710 NVME SSD + 1160 FORESEE P900 BGA NVMe SSD (DRAM-less) + 1202 Lexar NM610 PRO NVME SSD (DRAM-less) + 12e4 ORCA 4836 Series eSSD + 1602 Lexar NM790 NVME SSD (DRAM-less) 1d97 Lexar NM620 NVME SSD (DRAM-less) - 2263 SM2263EN/SM2263XT-based OEM SSD - 2269 Lexar NM760 NVME SSD (DRAM-less) - 5216 Lexar NM620 NVME SSD (DRAM-less) + 2263 SM2263EN/SM2263XT-based OEM NVME SSD (DRAM-less) + 2269 FORESEE XP2000, Lexar NM760 NVME SSD (DRAM-less) + 5216 FORESEE XP1000 / Lexar Professional CFexpress Type B Gold series, NM620 PCIe NVME SSD (DRAM-less) + 5220 FORESEE XP2100 NVMe SSD (DRAM-less) 5236 Lexar NM800 PRO NVME SSD # nee Facebook, Inc. 1d9b Meta Platforms, Inc. @@ -25241,6 +26093,7 @@ 3800 Network Flow Processor 3800 3803 Network Flow Processor 3800 Virtual Function 1dad Fungible + 0108 FC50, FC100, FC200 DPU NVMeoF Adapters 1db2 ATP ELECTRONICS INC 1db7 Phytium Technology Co., Ltd. dc20 [X100 Series] @@ -25270,21 +26123,42 @@ 5216 NVMe SSD Controller IG5216 (DRAM-less) 5220 NVMe SSD Controller IG5220 (DRAM-less) 5236 NVMe SSD Controller IG5236 + 1dbe 4001 Dongting-B1 DC SSD M.2 480GB + 1dbe 4002 Dongting-B1 DC SSD M.2 960GB 5636 NVMe DC SSD IG5636 - 1dbe 0001 DONGTING-N1 DC SSD U.2 - 1dbe 1001 DONGHU-Z1 DC ZNS SSD U.2 + 1dbe 0001 Dongting-N1 DC SSD U.2 1600GB + 1dbe 0002 Dongting-N1 DC SSD U.2 1920GB + 1dbe 0003 Dongting-N1 DC SSD U.2 3200GB + 1dbe 0004 Dongting-N1 DC SSD U.2 3840GB + 1dbe 0005 Dongting-N1 DC SSD U.2 6400GB + 1dbe 0006 Dongting-N1 DC SSD U.2 7680GB + 1dbe 1001 Donghu-Z1 DC ZNS SSD U.2 4000GB + 1dbe 1002 Donghu-Z1 DC ZNS SSD U.2 8000GB 5638 NVMe DC SSD IG5638 - 1dbe 2001 DONGTING-N1 DC SSD U.2 - 1dbe 3001 DONGHU-Z1 DC ZNS SSD U.2 + 1dbe 2001 Dongting-N2 DC SSD U.2 1600GB + 1dbe 2002 Dongting-N2 DC SSD U.2 1920GB + 1dbe 2003 Dongting-N2 DC SSD U.2 3200GB + 1dbe 2004 Dongting-N2 DC SSD U.2 3840GB + 1dbe 2005 Dongting-N2 DC SSD U.2 6400GB + 1dbe 2006 Dongting-N2 DC SSD U.2 7680GB + 1dbe 3001 Donghu-Z2 DC ZNS SSD U.2 4000GB + 1dbe 3002 Donghu-Z2 DC ZNS SSD U.2 8000GB + 5666 NVMe SSD Controller IG5666 + 5668 NVMe SSD Controller IG5668 + 5669 NVMe SSD Controller IG5669 [Tacoma] 1dbf Guizhou Huaxintong Semiconductor Technology Co., Ltd 0401 StarDragon4800 PCI Express Root Port 1dc2 Alco Digital Devices Limited 1dc5 FADU Inc. + 4081 FC4121 PCIe 4.0 NVMe controller [DELTA] + 6150 FC3081 PCIe 3.0 NVMe controller [BRAVO] 1dcd Liqid Inc. 1dcf Beijing Sinead Technology Co., Ltd. 1dd3 Sage Microelectronics Corp. 1dd4 Swissbit AG 0010 N-10m2 NVMe SSD + 0016 N-16 + 0020 EN-20 BGA NVMe SSD (DRAM-less) 1dd8 AMD Pensando Systems 0002 DSC2 Elba Upstream Port 1dd8 100e Distributed Services Card @@ -25515,6 +26389,8 @@ 2020 DC-390 Series SCSI Adapter [AMD Am53C974] 690c 690c dc29 DC290 +1de4 Raspberry Pi Ltd + 0001 RP1 PCIe 2.0 South Bridge 1de5 Eideticom, Inc 1000 IO Memory Controller 2000 NoLoad Hardware Development Kit @@ -25531,6 +26407,7 @@ 1dee Biwin Storage Technology Co., Ltd. 2262 HP EX950 NVMe SSD 2263 HP EX900 NVMe SSD (DRAM-less) + 5216 KingSpec NX series NVMe SSD (DRAM-less) 1def Ampere Computing, LLC e005 eMAG PCI Express Root Port 0 e006 eMAG PCI Express Root Port 1 @@ -25607,6 +26484,7 @@ 1df3 0001 ENA2100RN 1df5 Shenzhen TIGO Semiconductor 1202 kimtigo NVMe SSD (DRAM-less) + 2263 kimtigo MG931K NVMe SSD (DRAM-less) 1df7 opencpi.org 0001 ml605 0002 alst4 @@ -25620,8 +26498,10 @@ 1df8 d100 M.2 NVMe SSD 1df8 d201 M.2 NVMe SSD 1df8 d600 M.2 NVMe SSD +1dfa Astera Labs, Inc. 1dfc JSC NT-COM 1181 TDM 8 Port E1/T1/J1 Adapter +1e0d SambaNova Systems, Inc 1e0f KIOXIA Corporation 0001 NVMe SSD Controller BG4 (DRAM-less) 0007 NVMe SSD Controller Cx6 @@ -25683,6 +26563,7 @@ 1028 22b9 Ent NVMe CM7 FIPS E3.S MU 6.4TB 1028 22ba Ent NVMe CM7 FIPS E3.S MU 12.8TB 0018 Exceria Pro NVMe SSD + 001a NVMe SSD Controller BG6 (DRAM-less) 001f NVMe SSD Controller CD8 1028 2223 DC NVMe CD8 U.2 SED 15.36TB 1028 2224 DC NVMe CD8 U.2 SED 7.68TB @@ -25713,6 +26594,16 @@ 1028 223c Ent NVMe CM7 U.2 MU 6.4TB 1028 223d Ent NVMe CM7 U.2 MU 3.2TB 1028 223e Ent NVMe CM7 U.2 MU 1.6TB + 002a Exceria Plus G3 NVMe SSD (DRAM-less) + 002c NVMe SSD Controller CD8P EDSFF + 1028 22bf DC NVMe CD8P E3.S 15.36TB + 1028 22c0 DC NVMe CD8P E3.S 7.68TB + 1028 22c1 DC NVMe CD8P E3.S 3.84TB + 1028 22c2 DC NVMe CD8P E3.S 1.92TB + 1028 22c7 DC NVMe CD8P E3.S MU 12.8TB + 1028 22c8 DC NVMe CD8P E3.S MU 6.4TB + 1028 22c9 DC NVMe CD8P E3.S MU 3.2TB + 1028 22ca DC NVMe CD8P E3.S MU 1.6TB 1e17 Arnold & Richter Cine Technik GmbH & Co. Betriebs KG 1e18 Beijing GuangRunTong Technology Development Co.,Ltd 1e24 Squirrels Research Labs @@ -25744,16 +26635,21 @@ 8032 S6 [Enflame] # FHFL PCIe card, single slot, 3rd generation from Enflame c031 S30 [Enflame] -# FHFL PCIe card, single slot, 3rd generation from Enflame, 48GB device memory +# FHFL PCIe card, dual slot, 3rd generation from Enflame, 48GB device memory c032 S90 [Enflame] -# FHFL PCIe card, single slot, 3rd generation from Enflame, 48GB device memory - c033 S60 [Enflame] +# FHFL PCIe card, dual slot, 3rd generation from Enflame, 48GB device memory + c033 S60G [Enflame] +# FHFL PCIe card, dual slot, 3rd generation from Enflame, 48GB device memory + c035 S60 [Enflame] # nee Thinci, Inc 1e38 Blaize, Inc 0102 Xplorer X1600 1e39 MEDION AG +1e3a Cactus Technologies Limited + 2263 270PM6, 270PM7 series NVMe SSD 1e3b DapuStor Corporation 0600 NVMe SSD Controller DP600 + 1e3b 0006 Enterprise NVMe SSD U.2 ODP 7.68TB (J5001) 1e3b 0010 Enterprise NVMe SSD U.2 3.84TB (R5102) 1e3b 0013 Enterprise NVMe SSD U.2 3.20TB (R5302) 1e3b 0030 Enterprise NVMe SSD U.2 3.84TB (J5100) @@ -25788,12 +26684,49 @@ 1e3b 0069 Enterprise NVMe SSD U.2 3.20TB (R5301D) 1e3b 006c Enterprise NVMe SSD U.2 1.92TB (R5101) 1e3b 006d Enterprise NVMe SSD U.2 1.60TB (J5301) + 1e3b 00b9 Enterprise NVMe SSD U.2 ODP 25.60TB (R5301)/(J5301) + 1e3b 00be Enterprise NVMe SSD U.2 ODP 30.72TB (R5101)/(J5101) + 1e3b 00c1 Enterprise NVMe SSD U.2 ODP 25.60TB (R5301D)/(J5301D) + 1e3b 00c4 Enterprise NVMe SSD U.2 ODP 30.72TB (R5101D)/(J5101D) + 1e3b 00c7 Enterprise NVMe SSD U.2 ODP 25.60TB (J5300) + 1e3b 00c8 Enterprise NVMe SSD U.2 ODP 30.72TB (J5100) + 1e3b 00c9 Enterprise NVMe SSD U.2 ODP 15.36TB (J5001) + 1e3b 00ca Enterprise NVMe SSD U.2 ODP 3.84TB (J5102) + 1e3b 00cb Enterprise NVMe SSD U.2 ODP 7.68TB (J5102) + 1e3b 00cc Enterprise NVMe SSD U.2 QDP 3.84TB (J5101) + 1e3b 00cd Enterprise NVMe SSD U.2 ODP 7.68TB (J5101) + 1e3b 00ce Enterprise NVMe SSD U.2 QDP 3.84TB (J5101D) + 1e3b 00cf Enterprise NVMe SSD U.2 ODP 7.68TB (J5101D) + 1e3b 00dc Enterprise NVMe SSD U.2 ODP 30.72TB with SAMSUNG 32GB DRAM (J5001) + 1e3b 00dd Enterprise NVMe SSD U.2 ODP 30.72TB with MT 32GB DRAM(J5001) + 1e3b 00de Enterprise NVMe SSD U.2 ODP 15.36TB with SK 16GB DRAM(J5001D) + 1e3b 00df Enterprise NVMe SSD U.2 ODP 30.72TB with SAMSUNG 32GB DRAM(J5001) + 1e3b 00e7 Enterprise NVMe SSD U.2 ODP 30.72TB with MT 32GB DRAM(J5001D) + 1e3b 00e8 Enterprise NVMe SSD U.2 QDP 3.20TB (J5301) + 1e3b 00e9 Enterprise NVMe SSD U.2 ODP 6.40TB (J5301) + 1e3b 00ea Enterprise NVMe SSD U.2 QDP 3.20TB (J5301D) + 1e3b 00eb Enterprise NVMe SSD U.2 ODP 6.40TB (J5301D) + 1e3b 00ec Enterprise NVMe SSD U.2 ODP 30.72TB with MT 32GB DRAM(J5101) + 1e3b 00ed Enterprise NVMe SSD U.2 ODP 30.72TB with MT 32GB DRAM(R5101) + 1e3b 00ee Enterprise NVMe SSD U.2 ODP 15.36B with SK 16GB DRAM(J5101) + 1e3b 00ef Enterprise NVMe SSD U.2 ODP 12.80TB with SK 16GB DRAM(J5301) 1e3b 00f0 Enterprise NVMe SSD U.2 0.40TB (X2900) 1e3b 00f1 Enterprise NVMe SSD U.2 0.80TB (X2900) 1e3b 00f2 Enterprise NVMe SSD U.2 1.60TB (X2900) 1e3b 00f3 Enterprise NVMe SSD U.2 3.20TB (X2900) 1e3b 00f5 Enterprise NVMe SSD U.2 0.40TB (X2900P) 1e3b 00f6 Enterprise NVMe SSD U.2 0.80TB (X2900P) + 0800 DP800 + 1e3b 0001 Enterprise NVMe SSD U.2 QDP 3.84TB(R6100) + 1e3b 0007 Enterprise NVMe SSD U.2 ODP 15.36TB (R6100) + 1e3b 000a Enterprise NVMe SSD U.2 3.20TB (R6300) + 1e3b 000d Enterprise NVMe SSD U.2 6.40TB (R6300) + 1e3b 0010 Enterprise NVMe SSD U.2 12.80TB (R6300) + 1e3b 0018 Enterprise NVMe SSD U.2 QDP 3.84TB (R6100C) + 1e3b 0019 Enterprise NVMe SSD U.2 ODP 7.68TB (R6100C) + 1e3b 001a Enterprise NVMe SSD U.2 3.20TB (R6300C) + 1e3b 001b Enterprise NVMe SSD U.2 6.40TB (R6300C) + 1e3b 001c Enterprise NVMe SSD U.2 ODP 7.68TB (R6100) 1098 Haishen3 NVMe SSD 1e3b 0001 Enterprise NVMe SSD U.2 0.8TB (H2100) 1e3b 0002 Enterprise NVMe SSD U.2 0.96TB (H2200) @@ -25844,24 +26777,32 @@ # YMTC 1001 PC005 NVMe SSD 1011 PC210 NVMe SSD -# YMTC PCIe/NVMe SSD - 1013 PC210 + 1013 PC210 NVMe SSD + 1031 PC300 NVMe SSD (DRAM-less) + 1033 PC300 NVMe SSD (DRAM-less) + 1071 PC411 NVMe SSD (DRAM-less) 1e4b MAXIO Technology (Hangzhou) Ltd. 1001 NVMe SSD Controller MAP1001 - 1002 NVMe SSD Controller MAP1002 + 1002 NVMe SSD Controller MAP1002 (DRAM-less) 1003 NVMe SSD Controller MAP1003 1201 NVMe SSD Controller MAP1201 - 1202 NVMe SSD Controller MAP1202 + 1202 NVMe SSD Controller MAP1202 (DRAM-less) 1601 NVMe SSD Controller MAP1601 - 1602 NVMe SSD Controller MAP1602 + 1602 NVMe SSD Controller MAP1602 (DRAM-less) + 1608 NVMe SSD Controller MAP1608 (DRAM-less) 1e4c GSI Technology 0010 Associative Processing Unit [Leda] 1e4c 0120 SE120 1e50 IP3 Tech (HK) Limited 1e52 Tenstorrent Inc 401e Wormhole + 1e52 0014 n300 + 1e52 0018 n150 b140 Blackhole faca Grayskull + 1e52 0003 e150 + 1e52 0007 e75 + 1e52 000a e300 1e57 Beijing Panyi Technology Co., Ltd 0100 The device has already been deleted. 0000 0100 PY8800 64GB Accelerator @@ -25875,6 +26816,7 @@ 1e67 Untether AI 0002 runAI200 AI Inference Accelerator 1e68 Jiangsu Xinsheng Intelligent Technology Co., Ltd + 8111 EP2000Pro PCIe 3 NVMe SSD (DRAM-less) 1e6b Axiado Corp. 1e7b Dataland 1e7c Brainchip Inc @@ -25882,6 +26824,8 @@ 1e7e Pliops 9034 Pliops Extreme Data Processor [XDP1.0] 1e7f Jiangsu Huacun Elec. Tech. Co., Ltd. + 6002 MMY MMSP350 PCIe 3 NVMe SSD (DRAM-less) + 6003 MMY HC512GP3KH2T PCIe 3 NVMe SSD (DRAM-less) 1e81 Ramaxel Technology(Shenzhen) Limited 1203 NVMe SSD Controller UHXXXa series 1e81 a121 NVMe SSD UHXXXa series U.2 960GB @@ -25904,11 +26848,17 @@ # aka SED Systems 1e94 Calian SED 1e95 Solid State Storage Technology Corporation + 1000 XA1-311024 NVMe SSD M.2 + 1001 CA6-8D512 NVMe SSD M.2 1002 NVMe SSD [3DNAND] 2.5" U.2 (LJ1) 1e95 1101 NVMe SSD [3DNAND] 2.5" U.2 (LJ1) 1ea0 5636 TP1500 Series U.2 NVMe Datacenter SSD 1003 CLR-8W512 NVMe SSD M.2 (DRAM-less) + 1005 PLEXTOR M10P(GN) NVMe SSD M.2 1007 CL4-8D512 NVMe SSD M.2 (DRAM-less) + 1008 CL5-8D512 NVMe SSD M.2 (DRAM-less) + 3500 CA5-8D256 NVMe SSD M.2 + 35f1 PLEXTOR M9PGN Plus NVMe SSD M.2 9100 CL1-3D256-Q11 NVMe SSD M.2 1e96 Drut Technologies Inc. 1e9f Lynxi Technologies Co., Ltd. @@ -25916,6 +26866,7 @@ 2a16 Cloud Intelligent Inference Controller 2a18 Video Transcode Controller 2a20 Cloud Intelligent Inference and Training Controller + 2a22 Cloud Intelligent Inference Controller 1ea7 Intelliprop, Inc 223a Typhon+ PCIe to Gen-Z Bridge 224a IPA-PE224A CXL to Gen-Z Bridge [Sphinx] @@ -25930,6 +26881,9 @@ 1001 Video Accelerator 1eb4 Quantum Nebula Microelectronics Technology Co.,Ltd. 3401 SSD Contoller +1eb6 Wuxi Stars Microsystem Technology Co., Ltd +1eb9 Senscomm Semiconductor, Inc + 2020 SCM2625 Wi-Fi6 Network Adapter 1ebd EMERGETECH Company Ltd. 0101 Seirios 2063 Video Codec 1ec6 Vastai Technologies @@ -25970,8 +26924,15 @@ 1ec9 Wingtech Group(HongKong)Limited 1eca Lightmatter 0000 Envise-B +1ed0 Hosin Global Electronics + 2283 Patriot P300 NVMe SSD (DRAM-less) 1ed2 FuriosaAI, Inc. 0000 Warboy + 1111 RNGD + 0000 1111 RNGD-S + 0000 2222 RNGD VF + 0000 3333 RNGD-S VF + 2222 RNGD-S 1ed3 Yeston 1ed5 Moore Threads Technology Co.,Ltd 0100 MTT S10 @@ -25988,18 +26949,21 @@ 0201 MTT S80 0202 MTT S70 0203 MTT S60 - 0211 MTT X200 + 0211 MTT X300 0221 G2S80 0222 MTT S3000 + 1ed5 0001 C3150 0223 G2S4 0251 G2N10 02ff MTT HDMI/DP Audio 0300 MTT S90 Engineering Sample 0301 MTT S90 0323 MTT S4000 + 0327 MTT S4000 03ff MTT HDMI/DP Audio 1ed8 Digiteq Automotive - 0101 FG4 PCIe Frame Grabber + 0101 FG4 PCIe Frame Grabber (T100) + 0201 FG4 PCIe Frame Grabber (T200) 1ed9 Myrtle.ai 1ee1 Suzhou Kuhan Information Technologies 0050 Aurora NVMe SSD Controller @@ -26022,14 +26986,21 @@ 1142 XDX120M 1144 XDX E1200 1150 XDX120S + 1160 XDX121 + 1170 XDX121S + 11e0 XDX130 11e4 XDX E1300 1320 XDX150 + 1323 XDX R1500 1324 XDX X1500 1330 XDX150S - 1340 XDX150T - 1350 XDX150U - 13c0 XDX160 + 1333 XDX R1510 + 1340 XDX151 + 1350 XDX151S + 1360 XDX151T + 13c0 XDX160T 13d0 XDX160S + 13d3 XDX R1610 1500 XDX180 1503 XDX R1800 1504 XDX X1800 @@ -26037,7 +27008,7 @@ 15a0 XDX190 15a3 XDX R1900 15a4 XDX X1900 - 15a5 XDX X1900M2 + 15a5 XDX X1900D 15b0 XDX190S 1810 XDX TJ01 Audio 1820 XDX TJ02 Audio @@ -26045,12 +27016,13 @@ 1ef6 GrAI Matter Labs 1ef7 Shenzhen Gunnir Technology Development Co., Ltd 1efb Flexxon Pte Ltd +1eff Rebellions Inc. 1f02 Beijing Dayu Technology 1f03 Shenzhen Shichuangyi Electronics Co., Ltd - 1202 MAP1202-Based NVMe SSD + 1202 MAP1202-Based NVMe SSD (DRAM-less) 2262 SM2262EN-based OEM SSD 2263 SM2263XT-Base NVMe SSD - 5216 IG5216-based NVMe SSD + 5216 IG5216-based NVMe SSD (DRAM-less) 5220 IG5220-Based NVMe SSD 5236 IG5236-Based NVMe SSD 5636 IG5636-Based NVMe SSD @@ -26074,7 +27046,28 @@ 1a01 M16104 Family Virtual Function 1f0f 0001 M16104 Family Virtual Function 2022 D1055AS PCI Express Switch Upstream Port + 3403 M18110 Family + 3404 M18110 Lx Family + 3405 M18110 Family BASE-T + 3406 M18110 Lx Family BASE-T + 3407 M18110 Family OCP + 3408 M18110 Lx Family OCP + 3409 M18110 Family BASE-T OCP + 340a M18110 Lx Family BASE-T OCP + 340b M18120 Family + 340c M18120 Lx Family + 340d M18120 Family BASE-T + 340e M18120 Lx Family BASE-T + 340f M18120 Family OCP + 3410 M18120 Lx Family OCP + 3411 M18120 Family BASE-T OCP + 3412 M18120 Lx Family BASE-T OCP + 3413 M18100 Family Virtual Function 9088 D1055AS PCI Express Switch Downstream Port +1f16 XConn Technologies +# XConn XC50256 CXL2.0/PCIe5.0 switch + c500 XC50256 +1f17 Zettastone Technology 1f24 xFusion Digital Technologies Co., Ltd. 1058 EP500/EP600 NVMe SSD 1f24 1114 EP500 NVMe SSD(RI) @@ -26127,25 +27120,49 @@ 9032 SSSNIC SDI5.1 1f3f 00a1 Dual Port 100GE SDI5.1 1f40 Netac Technology Co.,Ltd - 2263 NVMe SSD (DRAM-less) + 0001 PCIe 4 NVMe SSD (DRAM-less) + 1202 PCIe 3 NVMe SSD (DRAM-less) + 1602 PCIe 4 NVMe SSD (DRAM-less) + 1f40 PCIe 4 NVMe SSD (DRAM-less) + 2263 PCIe 3 SM based NVMe SSD (DRAM-less) + 5216 PCIe 3 NVMe SSD (DRAM-less) + 5236 PCIe 4 INNOGRIT based NVMe SSD + 5765 PCIe 3 NVMe SSD (DRAM-less) 1f44 VVDN Technologies Private Limited +# YUSUR Technology Co., Ltd. +1f47 YUSUR Tech +# Network Accelerating Card + 2018 DPU Card +# Network Accelerating Card + 2020 DPU 1f4b Axera Semiconductor Co., Ltd 1f52 MangoBoost Inc. 1f56 SAPEON Inc. 1f60 Accelecom 0001 XELE-NIC 25K5 0054 XELE-NIC 25K5 -1f67 Yunsilicon Technology Co,. Ltd. +1f67 Yunsilicon Technology + 1011 metaConnect SmartNIC Physical Function + 1012 metaConnect SmartNIC Virtual Function + 1051 metaFusion DPU Physical Function + 1052 metaFusion DPU Virtual Function + 1059 metaFusion DPU SoC Network Interface + 1111 metaScale SmartNIC Physical Function + 1112 metaScale SmartNIC Virtual Function + 1151 metaVisor DPU Physical Function + 1152 metaVisor DPU Virtual Function 1faa Hexaflake (Shanghai) Information Technology Co., Ltd. 0c10 Compass C10 PF 0c11 Compass C10 VF 1fab Unifabrix Ltd. 0000 Nexus Alpha IVPU 0100 NoX Gamma + 01fd Smart Memory Node Generic CXL Port (T1) # UnifabriX Smart Memory Node Generic CXL Port 01fe Smart Memory Node Generic CXL Port (T2) # UnifabriX Smart Memory Node Generic CXL Port 01ff Smart Memory Node Generic CXL Port (T3) + 1b00 MAX Host Device 1fb0 ICube Corporation Limited 1000 NF1000 Series GPU 1fb0 1001 NF1001 @@ -26249,15 +27266,46 @@ 1fe4 0076 Enterprise NVMe SSD U.2 7.68TB(HP610) 1fe4 0077 Enterprise NVMe SSD U.2 6.40TB(HP630) 1fe4 0078 Enterprise NVMe SSD U.2 3.20TB(HP630) +1fe9 MemryX +# LinkData Technology (Tianjin) Co., LTD +1ff2 Linkdata + 10a1 NIC1160 Ethernet Controller Family + 1ff2 0c11 10GE Ethernet Adapter 1160-2X + 10a2 NIC1160 Ethernet Controller Virtual Function Family + 20a1 IOC2110 Storage Controller + 1ff2 0a11 2120-16i SATA3/SAS3 HBA Adapter + 1ff2 0a12 2120-8i SATA3/SAS3 HBA Adapter + 20a2 IOC2250 Storage Controller + 1ff2 0a21 2230-18i Tri-mode HBA Adapter + 1ff2 0a22 2230-10i Tri-mode HBA Adapter + 1ff2 0a23 2230-16i Tri-mode HBA Adapter + 1ff2 0a24 2230-8i Tri-mode HBA Adapter + 1ff2 0a28 2233-16i Tri-mode HBA Adapter + 30a2 ROC3250 Storage Controller + 1ff2 0b21 3260-18i Tri-mode RAID Adapter + 1ff2 0b22 3260-10i Tri-mode RAID Adapter + 1ff2 0b23 3260-16i Tri-mode RAID Adapter + 1ff2 0b24 3260-8i Tri-mode RAID Adapter 1ff4 DEEPX Co., Ltd. 0000 DX_M1 0001 DX_M1A + 1000 DX_H1 +1ff8 Beijing Gengtu Technology Co.Ltd + 2000 GT6910 + 2010 GT6908 +1ff9 Inagile Electronic Technology Co., LTD 2000 Smart Link Ltd. 2800 SmartPCI2800 V.92 PCI Soft DFT 2001 Temporal Research Ltd 2003 Smart Link Ltd. 8800 LM-I56N 2004 Smart Link Ltd. +202c CAEN S.p.A. + 5818 A5818 +2036 Netforward Microelectronics Co., Ltd. + 1618 NF1618 PCI Express Ethernet Controller + 1619 NF1618 Family Virtual Function +2046 GXMICRO Technology (Shanghai) Co., Ltd. 2048 Beijing SpaceControl Technology Co.Ltd 20f4 TRENDnet 2116 ZyDAS Technology Corp. @@ -26266,30 +27314,41 @@ 22b8 Flex-Logix Technologies 22a0 Flex Logix InferX X1 Inference Accelerator 22db Missing Link Electronics, Inc. + 1200 NVMe Streamer EP ERD 2304 Colorgraphic Communications Corp. +2321 Bruker AXS Inc. 2348 Racore 2010 8142 100VG/AnyLAN +256c Graphics Technology (HK) Co., Ltd. + 006d HS610 2646 Kingston Technology Company, Inc. 0010 HyperX Predator PCIe AHCI SSD -# KC2000 and KC2500 share the same DID - 2262 KC2000/KC2500 NVMe SSD - 2263 A2000 NVMe SSD - 5008 U-SNS8154P3 NVMe SSD - 500a DC1000B NVMe SSD - 500b DC1000M NVMe SSD + 2262 KC2000/KC2500 NVMe SSD SM2262EN + 2263 A2000 NVMe SSD SM2263EN + 5008 A1000/U-SNS8154P3 x2 NVMe SSD + 500a DC1000B NVMe SSD E12DC + 500b DC1000M NVMe SSD SM2270 500c OM8PCP Design-In PCIe 3 NVMe SSD (DRAM-less) 500d OM3PDP3 NVMe SSD - 500e NV1 NVMe SSD + 500e NV1 NVMe SSD E13T (DRAM-less) + 500f NV1 NVMe SSD SM2263XT (DRAM-less) 5010 OM8SBP NVMe PCIe SSD (DRAM-less) - 5012 DC1500M NVMe SSD -# KC3000 and Renegade share the same DID - 5013 KC3000/Renegade NVMe SSD - 5014 Design-In PCIe 4 NVMe SSD (TLC) + 5012 DC1500M NVMe SSD SM2270 + 5013 KC3000/FURY Renegade NVMe SSD E18 + 5014 OM8SEP4 Design-In PCIe 4 NVMe SSD (TLC) (DRAM-less) 5016 OM3PGP4 NVMe SSD + 5017 NV2 NVMe SSD SM2267XT (DRAM-less) + 5019 NV2 NVMe SSD E21T (DRAM-less) +# 128GB + 501a OM8PGP4 Design-In PCIe 4 NVMe SSD (TLC) (DRAM-less) 501b OM8PGP4 NVMe PCIe SSD (DRAM-less) - 501d NV2 NVMe PCIe 4 SSD 500GB (DRAM-less) + 501c NV2 NVMe SSD E19T (DRAM-less) + 501d NV2 NVMe SSD TC2200 (DRAM-less) 501f FURY Renegade NVMe SSD with heatsink - 5021 Design-In PCIe 4 NVMe SSD (QLC) + 5021 OM8SEP4 Design-In PCIe 4 NVMe SSD (QLC) (DRAM-less) + 5022 OM8PGP4 Design-In PCIe 4 NVMe SSD (QLC) (DRAM-less) + 5023 NV2 NVMe SSD SM2269XT (DRAM-less) + 5024 DC2000B NVMe SSD E18DC 270b Xantel Corporation 270f Chaintech Computer Co. Ltd 2711 AVID Technology Inc. @@ -26300,11 +27359,15 @@ 2a18 Video Transcode Controller 2bd8 ROPEX Industrie-Elektronik GmbH 3000 Hansol Electronics Inc. +3100 Dynabook Inc. 3112 Satelco Ingenieria S.A. 3130 AUDIOTRAK 3142 Post Impression Systems. 31ab Zonet 1faa ZEW1602 802.11b/g Wireless Adapter +328f Shenzhen EMEET Technology Co., Ltd. + 004c OfficeCore M1A + 2019 REC 600 HD Webcam 3388 Hint Corp 0013 HiNT HC4 PCI to ISDN bridge, Multimedia audio controller 0014 HiNT HC4 PCI to ISDN bridge, Network controller @@ -26340,6 +27403,7 @@ 3475 Arista Networks, Inc. 34ba Ice Lake-LP PCI Express Root Port #3 3513 ARCOM Control Systems Ltd +369a HighSecLabs, Ltd. 37d9 ITD Firm ltd. 1138 SCHD-PH-8 Phase detector 1140 VR-12-PCI 12-ch Relay Actuator Card @@ -26347,6 +27411,8 @@ 1142 PCI-CAN2 3842 eVga.com. Corp. 38ef 4Links +# Wrong ID in board programmed sub-did in place of sub-vid +393e Lenovo (wrong ID) 3d3d 3DLabs 0001 GLINT 300SX 0002 GLINT 500TX @@ -26662,9 +27728,22 @@ 5000 NV5000SC 4a14 5000 RT8029-Based Ethernet Adapter 4b10 Buslogic Inc. +4b43 KonteX Inc. 4c48 LUNG HWA Electronics 4c4d Liquid-Markets GmbH -4c52 LR-Link +# Dev versions of TaSR, not for production. + 9998 TaSR +# First versions of UberNIC, not for production. + 9999 UberNIC PoC/testing/dev +4c52 LR-LINK + 1001 Smart Network Adapter + 4c52 a008 LREG1008PT Single-port 1Gb Smart Ethernet Network Adapter + 1002 Smart Network Adapter + 4c52 a006 LREG1006PT Single-port 1.2Gb Network Security Isolation Adapter + 1003 Smart Network Adapter + 1004 Smart Network Adapter + 4c52 b010 LREG1010PF Single-port 10Gb FPGA Network Security Isolation Adapter + 4c52 b011 LREG1011PF Dual-port 10Gb FPGA Network Security Isolation Adapter 4c53 SBS Technologies 0000 PLUSTEST device 4c53 3000 PLUSTEST card (PC104+) @@ -26675,7 +27754,7 @@ 4d51 MediaQ Inc. 0200 MQ-200 4d54 Microtechnica Co Ltd -4d56 MATRIX VISION GmbH +4d56 Balluff MV GmbH 0000 [mvHYPERION-CLe/CLb] CameraLink PCI Express x1 Frame Grabber 0001 [mvHYPERION-CLf/CLm] CameraLink PCI Express x4 Frame Grabber 0010 [mvHYPERION-16R16/-32R16] 16 Video Channel PCI Express x4 Frame Grabber @@ -26712,6 +27791,7 @@ 0d10 SB-365x Motion Feedback Device 2f00 SB-3642 Motion Feedback Device 3000 SB-3644 Motion Feedback Device +4e4c NieL TechSolution 4e58 Nutanix, Inc. 0001 Virtual NVMe Controller 5045 University of Toronto @@ -26723,6 +27803,9 @@ 50b2 TerraTec Electronic GmbH 50ce System-on-Chip Engineering S.L. 0001 RELY-MIL-XMC-TSN-SWITCH + 0100 XMC_AV-Dual-ETH + 0101 XMC_AV-ETSN + 0102 XMC_AV-AFDX 5136 S S Technologies 5143 Qualcomm Inc 5145 Ensoniq (Old) @@ -26867,6 +27950,8 @@ 9043 Chrome 430 GT 9045 Chrome 430 ULP / 435 ULP / 440 GTX 9060 Chrome 530 GT +# Found in VIA Embedded uH4 graphics card + 9070 Chrome 5400EW 9102 86C410 [Savage 2000] 1092 5932 Viper II Z200 1092 5934 Viper II Z200 @@ -26907,6 +27992,7 @@ 0001 I-30xx Scanner Interface 5555 Genroco, Inc 0003 TURBOstor HFP-832 [HiPPI NIC] + 0004 Torrent QN16e [16-128 Channel MPEG QAM Modulator for DVB-C] 3b00 Epiphan DVI2PCIe video capture card 5646 Vector Fabrics BV 5654 VoiceTronix Pty Ltd @@ -26974,6 +28060,7 @@ 6903 TBS Technologies (wrong ID) 6909 TBS Technologies (wrong ID) 6910 TBS Technologies (wrong ID) +6f67 NOVAIUM Technology 7063 pcHDTV 2000 HD-2000 3000 HD-3000 @@ -26986,7 +28073,7 @@ e100 PTP3100 PCIe PTP Slave Clock 7470 TP-LINK Technologies Co., Ltd. 7526 HongQin (Beijing) Technology Co., Ltd. - 0082 HQ SSD 1TB + 0082 HQ SSD M.2 0083 HQ SSD 2TB M.2 NVMe 7604 O.N. Electronic Co Ltd. 78c0 Herrick Technology Laboratories, Inc. [HTL] @@ -27083,6 +28170,8 @@ 0100 2nd Generation Core Processor Family DRAM Controller 1028 04aa XPS 8300 1043 844d P8P67/P8H67 Series Motherboard + 1734 11b9 Esprimo P510 D3171 motherboard + 17aa 3070 ThinkCentre M91p 8086 200d DH61CR motherboard 0101 Xeon E3-1200/2nd Generation Core Processor Family PCI Express Root Port 1028 04b2 Vostro 3350 @@ -27091,6 +28180,8 @@ 0102 2nd Generation Core Processor Family Integrated Graphics Controller 1028 04aa XPS 8300 1043 0102 P8H67 Series Motherboard + 1734 11b9 G640 [Sandy Bridge, HD Graphics] on Esprimo P510 D3171 motherboard + 17aa 3070 ThinkCentre M91p 0104 2nd Generation Core Processor Family DRAM Controller 1028 04a3 Precision M4600 1028 04b2 Vostro 3350 @@ -27175,6 +28266,7 @@ 02b1 Comet Lake PCI Express Root Port #10 02b3 Comet Lake PCI Express Root Port #12 02b4 Comet Lake PCI Express Root Port #13 + 02b5 Comet Lake PCI Express Root Port #14 02b8 Comet Lake PCI Express Root Port #1 02bc Comet Lake PCI Express Root Port #5 02bf Comet Lake PCI Express Root Port #8 @@ -27197,10 +28289,13 @@ 02ef Comet Lake PCH-LP Shared SRAM 1028 09be Latitude 7410 02f0 Comet Lake PCH-LP CNVi WiFi - 8086 0034 Wireless-AC 9560 160MHz - 8086 0070 Wi-Fi 6 AX201 160MHz - 8086 0074 Wi-Fi 6 AX201 160MHz - 8086 4070 Wi-Fi 6 AX201 160MHz + 8086 0034 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9560 160MHz 2x2 [Jefferson Peak] + 8086 0070 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] + 8086 0074 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] + 8086 0234 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9560 80MHz 2x2 [Jefferson Peak] + 8086 0264 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9461 80MHz 1x1 [Jefferson Peak] + 8086 02a4 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9462 80MHz 1x1 [Jefferson Peak] + 8086 4070 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] 02f5 Comet Lake PCH-LP SCS3 02f9 Comet Lake Thermal Subsytem 1028 09be Latitude 7410 @@ -27318,10 +28413,12 @@ 06ed Comet Lake USB 3.1 xHCI Host Controller 06ef Comet Lake PCH Shared SRAM 06f0 Comet Lake PCH CNVi WiFi - 1a56 1651 Wi-Fi 6 AX1650s 160MHz (201D2W) [Killer] - 8086 0034 Wireless-AC 9560 - 8086 0074 Wi-Fi 6 AX201 160MHz - 8086 02a4 Wireless-AC 9462 + 1a56 1651 Dual Band Wi-Fi 6(802.11ax) Killer AX1650s 160MHz 2x2 [Cyclone Peak] + 1a56 1652 Dual Band Wi-Fi 6(802.11ax) Killer AX1650i 160MHz 2x2 [Cyclone Peak] + 8086 0034 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9560 160MHz 2x2 [Jefferson Peak] + 8086 0074 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] + 8086 02a4 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9462 80MHz 1x1 [Jefferson Peak] + 8086 42a4 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9462 80MHz 1x1 [Jefferson Peak] 06f9 Comet Lake PCH Thermal Controller 06fb Comet Lake PCH Serial IO SPI Controller #2 0700 CE Media Processor A/V Bridge @@ -27699,7 +28796,10 @@ 8086 8d08 NVMe Datacenter SSD [3DNAND] VE 2.5" U.2 (P5316) 8086 8d1d NVMe Datacenter SSD [3DNAND] VE E1.L 9.5/18mm (P5316) 8086 c008 NVMe Datacenter SSD [3DNAND] SE U.2 15mm (P5530) + 0b69 Ponte Vecchio XT (2 Tile) [Data Center GPU Max 1450] + 0b6e Ponte Vecchio XT (1 Tile) [Data Center GPU Max 1100C] 0bd0 Ponte Vecchio XL (2 Tile) + 0bd4 Ponte Vecchio XT (2 Tile) [Data Center GPU Max 1550VG] 0bd5 Ponte Vecchio XT (2 Tile) [Data Center GPU Max 1550] 0bd6 Ponte Vecchio XT (2 Tile) [Data Center GPU Max 1550] 0bd7 Ponte Vecchio XT (2 Tile) [Data Center GPU Max 1350] @@ -27809,6 +28909,9 @@ 8086 0000 Ethernet Controller XXV710 Intel(R) FPGA Programmable Acceleration Card N3000 for Networking 8086 0001 Ethernet Controller XXV710 Intel(R) FPGA Programmable Acceleration Card N3000 for Networking 0d9f Ethernet Controller I225-IT + 0dc5 Ethernet Connection (23) I219-LM + 1028 0c06 Precision 3580 + 0dc6 Ethernet Connection (23) I219-V 0dcd Ethernet Connection C825-X 0dd2 Ethernet Network Adapter I710 1137 0000 I710T4LG 4x1 GbE RJ45 PCIe NIC @@ -28408,6 +29511,7 @@ 10a9 8028 UV-BaseIO dual-port GbE 13a3 0037 DS4100 Secure Multi-Gigabit Server Adapter with Compression 15d9 a811 H8DGU + 4c52 9212 LREC9212PT Dual-port 10Gb Ethernet Network Adapter 8086 a01c Gigabit ET Dual Port Server Adapter 8086 a03c Gigabit ET Dual Port Server Adapter 8086 a04c Gigabit ET Dual Port Server Adapter @@ -28425,6 +29529,7 @@ 15d9 0605 X8SIL 15d9 060a X7SPA-H/X7SPA-HF Motherboard 15d9 060d C7SIM-Q Motherboard + 4c52 2201 LRES2201PT Single-port 1Gb Ethernet Network Adapter 8086 0001 Gigabit CT2 Desktop Adapter 8086 3578 Server Board S1200BTLR 8086 357a Server Board S1200BTS @@ -28456,6 +29561,7 @@ 8086 a02f Gigabit EF Dual Port Server Adapter 10e7 82576 Gigabit Network Connection 103c 31ff NC362i Integrated Dual Port BL-c Gigabit Server Adapter + 4c52 9701 LREC9701EF Single-port 1Gb Ethernet Network Adapter 10e8 82576 Gigabit Network Connection 8086 a02b Gigabit ET Quad Port Server Adapter 8086 a02c Gigabit ET Quad Port Server Adapter @@ -28515,6 +29621,11 @@ 1bd4 002f 10G SFP+ DP EP102Fi4A Adapter 1bd4 0032 10G SFP+ DP EP102Fi4 Adapter 1bd4 0067 F102I82599 + 4c52 1024 LR-LINK LRES9804BF Quad-port 10Gb Ethernet Server Adapter + 4c52 3002 LRES3002PF Dual-port 10Gb Ethernet Server Adapter for OCP + 4c52 3012 LRES3012PF Dual-port 10Gb Ethernet Server Adapter for OCP + 4c52 9801 LREC9801BF Single-port 10Gb Ethernet Server Adapter + 4c52 9802 LREC9802BF Dual-port 10Gb Ethernet Server Adapter 8086 0002 Ethernet Server Adapter X520-DA2 8086 0003 Ethernet Server Adapter X520-2 8086 0006 Ethernet Server Adapter X520-1 @@ -28764,10 +29875,27 @@ 125b Ethernet Controller I226-LM 125c Ethernet Controller I226-V 125d Ethernet Controller I226-IT + 12d1 Ethernet Controller E830-CC for backplane + 12d2 Ethernet Controller E830-CC for QSFP + 8086 0002 Ethernet Network Adapter E830-C-Q2 for OCP 3.0 + 8086 0004 Ethernet Network Adapter E830-CC-Q1 for OCP 3.0 + 12d3 Ethernet Controller E830-CC for SFP + 8086 0001 Ethernet Network Adapter E830-XXV-2 for OCP 3.0 + 8086 0003 Ethernet Network Adapter E830-XXV-2 + 8086 0004 Ethernet Network Adapter E830-XXV-4 for OCP 3.0 + 12d4 Ethernet Controller E830-CC for SFP-DD + 12d5 Ethernet Controller E830-C for backplane + 12d8 Ethernet Controller E830-C for QSFP + 12da Ethernet Controller E830-C for SFP + 12dc Ethernet Controller E830-XXV for backplane + 12dd Ethernet Controller E830-XXV for QSFP + 12de Ethernet Controller E830-XXV for SFP 1360 82806AA PCI64 Hub PCI Bridge 1361 82806AA PCI64 Hub Controller (HRes) 8086 1361 82806AA PCI64 Hub Controller (HRes) 8086 8000 82806AA PCI64 Hub Controller (HRes) + 1452 Infrastructure Data Path Function + 145c Infrastructure Data Path Function 1460 82870P2 P64H2 Hub PCI Bridge 1461 82870P2 P64H2 I/OxAPIC 15d9 3480 P4DP6 @@ -28777,6 +29905,7 @@ 1502 82579LM Gigabit Network Connection (Lewisville) 1028 04a3 Precision M4600 17aa 21ce ThinkPad T520 + 17aa 3070 ThinkCentre M91p 8086 3578 Server Board S1200BTLR 8086 357a Server Board S1200BTS 1503 82579V Gigabit Network Connection @@ -28858,6 +29987,20 @@ 1bd4 0066 F014I350 1bd4 008a F012I350 1bd4 008d ENFI1100-T4 + 4c52 0350 I350 1Gb 2-port Ethernet Network Adapter + 4c52 1350 LREC9222HT Dual-port 1Gb Ethernet Network Adapter + 4c52 2003 LRES2003PT Dual-port 1Gb Ethernet Network Adapter + 4c52 2005 LRES2005PT Quad-port 1Gb Ethernet Network Adapter + 4c52 2006 LRES2006PT Six-port 1Gb Ethernet Network Adapter + 4c52 2008 LRES2008PT Eight-port 1Gb Ethernet Network Adapter + 4c52 2018 LRES2018PT Twelve-port 1Gb Ethernet Network Adapter + 4c52 2202 LRES2202PT Dual-port 1Gb Ethernet Network Adapter + 4c52 2217 LRES2217PT Dual-port 1Gb Ethernet Network Adapter + 4c52 3010 LRES3010PF Dual-port 1Gb Ethernet Server Adapter for OCP + 4c52 3023 LRES3023PT Quad-port 1Gb Ethernet Server Adapter for OCP + 4c52 3041 LRES3041PT Dual-port 1Gb Ethernet Server Adapter for OCP + 4c52 4006 LRES4006MT Quad-port 1Gb Ethernet Netwaork Adapter + 4c52 9712 LREC9712HT Dual-port 10Gb Ethernet Network Adapter 8086 0001 Ethernet Server Adapter I350-T4 8086 0002 Ethernet Server Adapter I350-T2 8086 0003 Ethernet Network Adapter I350-T4 for OCP NIC 3.0 @@ -28872,6 +30015,11 @@ 1522 I350 Gigabit Fiber Network Connection 108e 7b17 Quad Port GbE PCIe 2.0 ExpressModule, MMF 108e 7b19 Dual Port GbE PCIe 2.0 Low Profile Adapter, MMF + 4c52 1006 LRES1006PF Six-port 1Gb Ethernet Network Adapter + 4c52 2203 LRES2203PF Dual-port 1Gb Ethernet Network Adapter + 4c52 9710 LREC9710HF Single-port 1Gb Ethernet Network Adapter + 4c52 9712 LREC9712HF Dual-port 1Gb Ethernet Network Adapter + 4c52 9714 LREC9714HF Quad-port 1Gb Ethernet Network Adapter 8086 0002 Ethernet Server Adapter I350-T2 8086 0003 Ethernet Server Adapter I350-F4 8086 0004 Ethernet Server Adapter I350-F2 @@ -28886,6 +30034,7 @@ 103c 18d1 Ethernet 1Gb 2-port 361FLB Adapter 103c 1989 Ethernet 1Gb 2-port 363i Adapter 103c 339f Ethernet 1Gb 4-port 366M Adapter + 4c52 9714 LREC9714HT Quad-port 10Gb Ethernet Network Adapter 8086 1f52 1GbE 4P I350 Mezz 1524 I350 Gigabit Connection 1525 82567V-4 Gigabit Network Connection @@ -28911,6 +30060,7 @@ 1bd4 001a 10G base-T DP ER102Ti3 Rack Adapter 1bd4 0033 10G base-T DP EP102Ti3 Adapter 1bd4 0034 10G base-T DP EP102Ti3A Adapter + 4c52 9802 LREC9802BT Dual-port 10Gb Ethernet Network Adapter 8086 0001 Ethernet Converged Network Adapter X540-T2 8086 0002 Ethernet Converged Network Adapter X540-T1 8086 001a Ethernet Converged Network Adapter X540-T2 @@ -28935,9 +30085,23 @@ 17aa 1100 ThinkServer Ethernet Server Adapter 17aa 1509 I210 Gigabit Network Connection 17aa 404d I210 PCIe 1Gb 1-Port RJ45 LOM + 17aa 407a I210 PCIe 1Gb 1-Port RJ45 LOM + 4c52 1051 LRES1051PT Dual-port 1Gb Ethernet Network Adapter + 4c52 1210 LREC9204CT Single-port 1Gb Ethernet Network Adapter + 4c52 2057 LRES2057PT Dual-port 1Gb Ethernet Network Adapter + 4c52 2206 LRES2206PT Single-port 1Gb Ethernet Network Adapter + 4c52 2210 LRES2210PT Single-port 1Gb Ethernet Network Adapter + 4c52 2211 LRES2211PT Single-port 1Gb Ethernet Network Adapter + 4c52 2214 LRES2214PT Single-port 1Gb Ethernet Network Adapter + 4c52 3002 LRES3002PT Dual-port 1Gb Ethernet Network Adapter + 4c52 3004 LRES3004PT Quad-port 1Gb Ethernet Network Adapter 8086 0001 Ethernet Server Adapter I210-T1 8086 0002 Ethernet Server Adapter I210-T1 1536 I210 Gigabit Fiber Network Connection + 4c52 2204 LRES2204PT Single-port 1Gb Ethernet Network Adapter + 4c52 2212 LRES2212PF Single-port 1Gb Ethernet Network Adapter + 4c52 2213 LRES2213PF Single-port 1Gb Ethernet Network Adapter + 4c52 6230 LREC6230PF Single-port 1Gb Ethernet Network Adapter 1537 I210 Gigabit Backplane Connection 1059 0110 T4005 1GbE interface 1059 0111 T4007 1GbE interface @@ -28981,6 +30145,7 @@ 8086 0001 Ethernet Server Bypass Adapter X520-SR2 8086 0002 Ethernet Server Bypass Adapter X520-LR2 1560 Ethernet Controller X540 + 4c52 9801 LREC9801BT Single-port 10Gb Ethernet Network Adapter 1563 Ethernet Controller X550 1028 1fa8 Ethernet 10G 4P X550/I350 rNDC 1028 1fa9 Ethernet 10G 4P X550 rNDC @@ -28995,6 +30160,8 @@ 193d 1008 560T-B 193d 1009 560T-L 193d 1011 UN-NIC-ETH563T-sL-2P + 4c52 1025 LRES1025PT Dual-port 10Gb Ethernet Network Adapter + 4c52 9812 LREC9812BT Dual-port 10Gb Ethernet Network Adapter 8086 0001 Ethernet Converged Network Adapter X550-T2 8086 001a Ethernet Converged Network Adapter X550-T2 8086 001b Ethernet Server Adapter X550-T2 for OCP @@ -29042,12 +30209,17 @@ 1bd4 0065 F102IX710 1bd4 0074 Ethernet Network Adapter X710-BM2 for lldp 1bd4 008b F102IX710 + 4c52 3003 LRES3003PF Quad-port 10Gb Ethernet Server Adapter for OCP + 4c52 3007 LRES3007PF Quad-port 10Gb Ethernet Server Adapter for OCP + 4c52 3039 LRES3039PF Dual-port 10Gb Ethernet Server Adapter for OCP + 4c52 9804 LREC9804BF Quad-port 10Gb Ethernet Server Adapter + 4c52 9812 LREC9812BF Dual-port 10Gb Ethernet Server Adapter 8086 0000 Ethernet Converged Network Adapter X710 8086 0001 Ethernet Converged Network Adapter X710-4 8086 0002 Ethernet Converged Network Adapter X710-4 8086 0004 Ethernet Converged Network Adapter X710-4 - 8086 0005 Ethernet 10G 4P X710 Adapter - 8086 0006 Ethernet 10G 2P X710 Adapter + 8086 0005 Ethernet Converged Network Adapter X710 + 8086 0006 Ethernet Converged Network Adapter X710 8086 0007 Ethernet Converged Network Adapter X710-2 8086 0008 Ethernet Converged Network Adapter X710-2 8086 0009 Ethernet Controller X710 for 10GbE SFP+ @@ -29096,6 +30268,8 @@ 108e 7b1d 10Gb/40Gb Ethernet Adapter 1137 0000 Ethernet Converged NIC XL710-QDA2 1137 013c Ethernet Converged NIC XL710-QDA2 + 4c52 3042 LRES3042PF Dual-port 40Gb Ethernet Server Adapter for OCP + 4c52 9902 LREC9902BF Dual-port 40Gb Ethernet Server Adapter 8086 0000 Ethernet Converged Network Adapter XL710-Q2 8086 0001 Ethernet Converged Network Adapter XL710-Q2 8086 0002 Ethernet Converged Network Adapter XL710-Q2 @@ -29103,6 +30277,7 @@ 8086 0004 Ethernet Server Adapter XL710-Q2OCP 8086 0006 Ethernet Converged Network Adapter XL710-Q2 1584 Ethernet Controller XL710 for 40GbE QSFP+ + 4c52 9901 LREC9901BF Single-port 40Gb Ethernet Server Adapter 8086 0000 Ethernet Converged Network Adapter XL710-Q1 8086 0001 Ethernet Converged Network Adapter XL710-Q1 8086 0002 Ethernet Converged Network Adapter XL710-Q1 @@ -29123,6 +30298,7 @@ 1589 Ethernet Controller X710/X557-AT 10GBASE-T 108e 0000 Quad Port 10GBase-T Adapter 108e 7b1c Quad Port 10GBase-T Adapter + 4c52 9804 LREC9804BT Quad-port 10Gb Ethernet Network Adapter 8086 0000 Ethernet Converged Network Adapter X710-T 8086 0001 Ethernet Converged Network Adapter X710-T4 8086 0002 Ethernet Converged Network Adapter X710-T4 @@ -29156,6 +30332,7 @@ 1374 023b Quad Port 25 Gigabit Ethernet PCI Express Server Adapter (PE31625G4I71LEU) 1590 0000 Ethernet Network Adapter XXV710-2 1590 0253 Ethernet 10/25/Gb 2-port 661SFP28 Adapter + 4c52 3017 LRES3017PF Dual-port 25Gb Ethernet Server Adapter for OCP 8086 0000 Ethernet Network Adapter XXV710 8086 0001 Ethernet Network Adapter XXV710-2 8086 0002 Ethernet Network Adapter XXV710-2 @@ -29175,6 +30352,8 @@ 1592 Ethernet Controller E810-C for QSFP 1137 02bf E810CQDA2 2x100 GbE QSFP28 PCIe NIC 193d 1050 NIC-ETH1060F-LP-2P 2x100GbE Ethernet PCIe Card + 4c52 1014 LRES1014PF Dual-port 100Gb Ethernet Server Adapter + 4c52 3026 LRES3026PF Dual-port 100Gb Ethernet Server Adapter for OCP 8086 0001 Ethernet Network Adapter E810-C-Q1 8086 0002 Ethernet Network Adapter E810-C-Q2 8086 0004 Ethernet Network Adapter E810-C-Q2 @@ -29191,10 +30370,13 @@ 8086 0011 Ethernet Network Adapter E810-C-Q1 for OCP3.0 8086 0012 Ethernet 100G 2P E810-C-st Adapter 8086 0013 Ethernet Network Adapter E810-C-Q1 for OCP 3.0 + 8086 0014 Ethernet 100G 2P E810-2C Adapter 1593 Ethernet Controller E810-C for SFP 1137 02c3 E810XXVDA4 4x25/10 GbE SFP28 PCIe NIC 1137 02e9 E810XXVDA4TG 4x25/10 GbE SFP28 PCIe NIC 1137 02ea E810XXVDA4T 4x25/10 GbE SFP28 PCIe NIC + 4c52 1023 LRES1023PF Quad-port 25Gb Ethernet Server Adapter + 4c52 3027 LRES3027PF Quad-port 25Gb Ethernet Server Adapter for OCP 8086 0002 Ethernet Network Adapter E810-L-2 8086 0005 Ethernet Network Adapter E810-XXV-4 8086 0006 Ethernet Network Adapter E810-XXV-4 @@ -29209,6 +30391,7 @@ 8086 0010 Ethernet 25G 4P E810-XXV-st Adapter 8086 4010 Ethernet Network Adapter E810-XXV-4 8086 4013 Ethernet Network Adapter E810-XXV-4 for OCP 3.0 + 8086 401c Ethernet Network Adapter E810-XXV-4 for OCP 3.0 1599 Ethernet Controller E810-XXV for backplane 8086 0001 Ethernet 25G 2P E810-XXV-k Mezz 159a Ethernet Controller E810-XXV for QSFP @@ -29222,6 +30405,8 @@ 1bd4 0083 Ethernet Network Adapter E810-XXVAM2 for lldp 1bd4 00a0 S252IE810 1eec 0102 VSE-225-41E Dual-port 10Gb/25Gb Etherent PCIe + 4c52 0003 LRES1021PF Dual-port 25Gb Ethernet Server Adapter + 4c52 3029 LRES3029PF Dual-port 25Gb Ethernet Server Adapter for OCP 8086 0001 Ethernet 25G 2P E810-XXV OCP 8086 0002 Ethernet 25G 2P E810-XXV Adapter 8086 0003 Ethernet Network Adapter E810-XXV-2 @@ -29253,6 +30438,7 @@ 15b6 DSL6540 USB 3.1 Controller [Alpine Ridge] 15b7 Ethernet Connection (2) I219-LM 15b8 Ethernet Connection (2) I219-V + 1462 7994 H110M ECO/GAMING 1462 7a72 H270 PC MATE 15b9 Ethernet Connection (3) I219-LM 15bb Ethernet Connection (7) I219-LM @@ -29274,6 +30460,7 @@ 8086 0001 Ethernet SDI Adapter FM10420-100GbE-QDA2 8086 0002 Ethernet SDI Adapter FM10840-MTP2 15d1 Ethernet Controller 10G X550T + 4c52 9811 LREC9811BT Single-port 10Gb Ethernet Network Adapter 8086 0002 Ethernet Converged Network Adapter X550-T1 8086 001b Ethernet Server Adapter X550-T1 for OCP 8086 0021 Ethernet Converged Network Adapter X550-T1 @@ -29310,9 +30497,11 @@ 15ef JHL7540 Thunderbolt 3 Bridge [Titan Ridge DD 2018] 15f0 JHL7540 Thunderbolt 3 USB Controller [Titan Ridge DD 2018] 15f2 Ethernet Controller I225-LM + 4c52 2031 LRES2031PT Single-port 2.5Gb Ethernet Network Adapter 8086 0001 Ethernet Network Adapter I225-T1 8086 0002 Ethernet Network Adapter I225-T1 15f3 Ethernet Controller I225-V + 4c52 2031 LRES2031PT Single-port 2.5Gb Ethernet Network Adapter 8086 0003 Intel(R) Ethernet Controller (3) I225-V 15f4 Ethernet Connection (15) I219-LM 15f5 Ethernet Connection (15) I219-V @@ -29323,6 +30512,7 @@ 15fc Ethernet Connection (13) I219-V 15ff Ethernet Controller X710 for 10GBASE-T 1014 0000 PCIe3 4-port 10GbE Base-T Adapter + 108e 7b1f Quad Port 10GBase-T Adapter - CP 1137 0000 X710TLG GbE RJ45 PCIe NIC 1137 02c1 X710T2LG 2x10 GbE RJ45 PCIe NIC 1137 02c2 X710T4LG 4x10 GbE RJ45 PCIe NIC @@ -29330,6 +30520,8 @@ 1137 02da Ethernet Network Adapter X710-T4L OCP 3.0 # NIC-ETH565T-3S-2P OCP3.0 2x10G Base-T Card 193d 1082 NIC-ETH565T-3S-2P + 4c52 1012 LRES1012PT Dual-port 10Gb Ethernet Network Adapter + 4c52 3021 LRES3021PT Dual-port 10Gb Ethernet Server Adapter for OCP 8086 0000 Ethernet Network Adapter X710-TL 8086 0001 Ethernet Network Adapter X710-T4L 8086 0002 Ethernet Network Adapter X710-T4L @@ -29405,10 +30597,18 @@ 1898 Ethernet Connection E822-L for SFP 1899 Ethernet Connection E822-L/X557-AT 10GBASE-T 189a Ethernet Connection E822-L 1GbE - 18a0 C4xxx Series QAT + 18a0 Atom Processor P5xxx Series QAT 18a1 C4XXX Series QAT Virtual Function + 18b3 Atom Processor P5xxx Series SATA Controller + 18d0 Atom Processor P5xxx Series USB xHCI Controller + 18d3 Atom Processor P5xxx Series MEI Controller + 18d6 Atom Processor P5xxx Series MEI Controller + 18df Atom Processor P5xxx Series SMBus Controller + 18e0 Atom Processor P5xxx Series SPI Controller + 18e1 Atom Processor P5xxx Series Trace Hub 18ee 200xx Series QAT 18ef 200xx Series QAT Virtual Function + 18f3 Atom Processor P5xxx Series SATA Controller 1900 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers 1901 6th-10th Gen Core Processor PCIe Controller (x16) 1902 HD Graphics 510 @@ -29442,6 +30642,7 @@ 1911 Xeon E3-1200 v5/v6 / E3-1500 v5 / 6th/7th/8th Gen Core Processor Gaussian Mixture Model 1028 0869 Vostro 3470 1028 09be Latitude 7410 + 1462 7994 H110M ECO/GAMING 1462 7a72 H270 PC MATE 17aa 2247 ThinkPad T570 17aa 224f ThinkPad X1 Carbon 5th Gen @@ -29572,6 +30773,9 @@ 1b48 82597EX 10GbE Ethernet Controller 8086 a01f PRO/10GbE LR Server Adapter 8086 a11f PRO/10GbE LR Server Adapter +# Also rebranded as Montage IOH M88IO3020 + 1bcd Emmitsburg (C740 Family) USB 3.2 Gen 1 xHCI Controller + 1bd4 00a5 RS0800I5H16i 1bd2 Sapphire Rapids SATA AHCI Controller 1bf2 Sapphire Rapids SATA AHCI Controller 1c00 6 Series/C200 Series Chipset Family Desktop SATA Controller (IDE mode, ports 0-3) @@ -29579,6 +30783,7 @@ 1c02 6 Series/C200 Series Chipset Family 6 port Desktop SATA AHCI Controller 1028 04aa XPS 8300 1043 844d P8 series motherboard + 17aa 3070 ThinkCentre M91p 8086 200d DH61CR motherboard 8086 7270 Server Board S1200BT Family 1c03 6 Series/C200 Series Chipset Family 6 port Mobile SATA AHCI Controller @@ -29630,6 +30835,7 @@ 1043 8418 P8P67 Deluxe Motherboard 1043 841b P8H67 Series Motherboard 17aa 21cf ThinkPad T520 + 17aa 3070 ThinkCentre M91p # Realtek ALC888 audio codec 8086 2008 DQ67SW board 8086 200d DH61CR motherboard @@ -29641,6 +30847,7 @@ 1028 04da Vostro 3750 1043 844d P8 series motherboard 17aa 21cf ThinkPad T520 + 17aa 3070 ThinkCentre M91p 8086 200d DH61CR motherboard 8086 7270 Server Board S1200BT Family / Apple MacBook Pro 8,1/8,2 1c24 6 Series/C200 Series Chipset Family Thermal Management Controller @@ -29652,6 +30859,7 @@ 1028 04da Vostro 3750 1043 844d P8 series motherboard 17aa 21cf ThinkPad T520 + 17aa 3070 ThinkCentre M91p 8086 200d DH61CR motherboard 8086 7270 Server Board S1200BT Family / Apple MacBook Pro 8,1/8,2 1c27 6 Series/C200 Series Chipset Family USB Universal Host Controller #1 @@ -29665,22 +30873,26 @@ 1028 04da Vostro 3750 1043 844d P8 series motherboard 17aa 21cf ThinkPad T520 + 17aa 3070 ThinkCentre M91p 8086 200d DH61CR motherboard 8086 7270 Server Board S1200BT Family / Apple MacBook Pro 8,1/8,2 1c33 6 Series/C200 Series Chipset Family LAN Controller 1c35 6 Series/C200 Series Chipset Family VECI Controller 1c3a 6 Series/C200 Series Chipset Family MEI Controller #1 + 1028 0493 Latitude E6420 1028 04a3 Precision M4600 1028 04aa XPS 8300 1028 04b2 Vostro 3350 1028 04da Vostro 3750 1043 844d P8 series motherboard 17aa 21cf ThinkPad T520 + 17aa 3070 ThinkCentre M91p 8086 200d DH61CR motherboard 8086 7270 Apple MacBookPro8,2 [Core i7, 15", 2011] 1c3b 6 Series/C200 Series Chipset Family MEI Controller #2 1c3c 6 Series/C200 Series Chipset Family IDE-r Controller 1c3d 6 Series/C200 Series Chipset Family KT Controller + 17aa 3070 ThinkCentre M91p 1c40 6 Series/C200 Series Chipset Family LPC Controller 1c41 Mobile SFF 6 Series Chipset Family LPC Controller 1c42 6 Series/C200 Series Chipset Family LPC Controller @@ -29702,6 +30914,7 @@ 1c4c Q65 Express Chipset LPC Controller 1c4d QS67 Express Chipset LPC Controller 1c4e Q67 Express Chipset LPC Controller + 17aa 3070 ThinkCentre M91p 1c4f QM67 Express Chipset LPC Controller 1028 04a3 Precision M4600 17aa 21cf ThinkPad T520 @@ -29808,10 +31021,12 @@ 1d74 C608/C606/X79 series chipset PCI Express Upstream Port 1d76 C600/X79 series chipset Multi-Function Glue 1e00 7 Series/C210 Series Chipset Family 4-port SATA Controller [IDE mode] + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1e01 7 Series Chipset Family 4-port SATA Controller [IDE mode] 144d c652 NP300E5C series laptop 1e02 7 Series/C210 Series Chipset Family 6-port SATA Controller [AHCI mode] 1043 84ca P8 series motherboard + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1849 1e02 Motherboard 1e03 7 Series Chipset Family 6-port SATA Controller [AHCI mode] 1043 108d VivoBook X202EV @@ -29834,6 +31049,7 @@ 1043 84ca P8H77-I Motherboard 10cf 16e9 LIFEBOOK E752 144d c652 NP300E5C series laptop + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1849 1e10 Motherboard 1e12 7 Series/C210 Series Chipset Family PCI Express Root Port 2 1043 108d VivoBook X202EV @@ -29850,6 +31066,7 @@ 1043 84ca P8H77-I Motherboard 1849 1e18 Motherboard 1e1a 7 Series/C210 Series Chipset Family PCI Express Root Port 6 + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1849 1e1a Motherboard 1e1c 7 Series/C210 Series Chipset Family PCI Express Root Port 7 1e1e 7 Series/C210 Series Chipset Family PCI Express Root Port 8 @@ -29864,6 +31081,7 @@ 1043 8445 P8Z77-V LX Motherboard 10cf 1757 LIFEBOOK E752 144d c652 NP300E5C series laptop + 1734 11d8 B75 [Ivy Bridge] chipset CX20642 audio controller on Esprimo P510 D3171 motherboard 1849 1898 Z77 Extreme4 motherboard 1e22 7 Series/C216 Chipset Family SMBus Controller 1043 108d VivoBook X202EV @@ -29872,6 +31090,7 @@ 1043 84ca P8 series motherboard 10cf 16e6 LIFEBOOK E752 144d c652 NP300E5C series laptop + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1849 1e22 Motherboard 1e24 7 Series/C210 Series Chipset Family Thermal Management Controller 1043 1517 Zenbook Prime UX31A @@ -29883,6 +31102,7 @@ 1043 84ca P8 series motherboard 10cf 16e8 LIFEBOOK E752 144d c652 NP300E5C series laptop + 1734 11d6 B75 [Ivy Bridge] chipset USB 2.0 controller on Esprimo P510 D3171 motherboard 1849 1e26 Motherboard 1e2d 7 Series/C216 Chipset Family USB Enhanced Host Controller #2 1043 108d VivoBook X202EV @@ -29891,6 +31111,7 @@ 1043 84ca P8 series motherboard 10cf 16e8 LIFEBOOK E752 144d c652 NP300E5C series laptop + 1734 11d6 B75 [Ivy Bridge] chipset USB 2.0 controller on Esprimo P510 D3171 motherboard 1849 1e2d Motherboard 1e31 7 Series/C210 Series Chipset Family USB xHCI Host Controller 103c 179b Elitebook 8470p @@ -29900,6 +31121,7 @@ 1043 1517 Zenbook Prime UX31A 1043 84ca P8 series motherboard 10cf 16ee LIFEBOOK E752 + 1734 11d6 B75 [Ivy Bridge] chipset USB 3.0 controller on Esprimo P510 D3171 motherboard 17aa 21f3 ThinkPad T430 1849 1e31 Motherboard 1e33 7 Series/C210 Series Chipset Family LAN Controller @@ -29910,6 +31132,7 @@ 1043 84ca P8 series motherboard 10cf 16ea LIFEBOOK E752 144d c652 NP300E5C series laptop + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1849 1e3a Motherboard 1e3b 7 Series/C210 Series Chipset Family MEI Controller #2 1e3c 7 Series/C210 Series Chipset Family IDE-r Controller @@ -29925,6 +31148,7 @@ 1e47 Q77 Express Chipset LPC Controller 1e48 Q75 Express Chipset LPC Controller 1e49 B75 Express Chipset LPC Controller + 1734 11d6 Esprimo P510 D3171 motherboard 1e4a H77 Express Chipset LPC Controller 1043 84ca P8H77-I Motherboard 1e4b 7 Series Chipset Family LPC Controller @@ -30314,10 +31538,12 @@ 1028 0211 Optiplex 755 1028 02da OptiPlex 980 1028 04f7 PowerEdge R320 server + 103c 130a Z600 Workstation 103c 2a3b Pavilion A1512X 103c 2a6f Asus IPIBL-LB Motherboard 103c 31fe ProLiant DL140 G3 103c 330b ProLiant ML150 G6 Server + 1043 81ec P5B Motherboard # same ID possibly also on other ASUS boards 1043 8277 P5K PRO Motherboard 1043 844d P8 series motherboard @@ -30327,7 +31553,9 @@ 1462 7418 Wind PC MS-7418 15d9 060d C7SIM-Q Motherboard 15d9 9680 X7DBN Motherboard + 1734 11d6 B75 [Ivy Bridge] chipset on Esprimo P510 D3171 motherboard 1775 11cc CC11/CL11 + 17aa 3070 ThinkCentre M91p 8086 7270 Server Board S1200BTS 2450 82801E ISA Bridge (LPC) 2452 82801E USB Controller @@ -30839,7 +32067,18 @@ 8086 3806 Optane Memory 16GB 8086 3810 Optane Memory M10 16GB 2525 Optane NVME SSD P1600X Series - 2526 Wireless-AC 9260 + 2526 Wi-Fi 5(802.11ac) Wireless-AC 9x6x [Thunder Peak] + 8086 0014 Dual Band Wi-Fi 5 Wireless-AC 9260 160MHz 2x2 + 8086 0210 Dual Band Wi-Fi 5 Wireless-AC 9260 80MHz 2x2 + 8086 0214 Dual Band Wi-Fi 5 Wireless-AC 9260 80MHz 2x2 + 8086 0230 Dual Band Wi-Fi 5 Wireless-AC 9560 80MHz 2x2 + 8086 0234 Dual Band Wi-Fi 5 Wireless-AC 9560 80MHz 2x2 + 8086 0238 Dual Band Wi-Fi 5 Wireless-AC 9560 80MHz 2x2 + 8086 023c Dual Band Wi-Fi 5 Wireless-AC 9560 80MHz 2x2 + 8086 0260 Dual Band Wi-Fi 5 Wireless-AC 9461 80MHz 1x1 + 8086 0264 Dual Band Wi-Fi 5 Wireless-AC 9461 80MHz 1x1 + 8086 02a0 Dual Band Wi-Fi 5 Wireless-AC 9462 80MHz 1x1 + 8086 02a4 Dual Band Wi-Fi 5 Wireless-AC 9462 80MHz 1x1 2530 82850 850 (Tehama) Chipset Host Bridge (MCH) 1028 00c7 Dimension 8100 147b 0507 TH7II-RAID @@ -31349,11 +32588,11 @@ 2723 Wi-Fi 6 AX200 1a56 1654 Killer Wi-Fi 6 AX1650x (AX200NGW) 8086 0084 Wi-Fi 6 AX200NGW - 2725 Wi-Fi 6 AX210/AX211/AX411 160MHz + 2725 Wi-Fi 6E(802.11ax) AX210/AX1675* 2x2 [Typhoon Peak] + 1a56 1673 Killer AX1675w 160MHz + 1a56 1674 Killer Wi-Fi 6E AX1675x 160MHz 8086 0020 Wi-Fi 6 AX210 160MHz 8086 0024 Wi-Fi 6 AX210 160MHz - 8086 0090 Wi-Fi 6 AX211 160MHz - 8086 00b0 Wi-Fi 6 AX411 160MHz 8086 0310 Wi-Fi 6 AX210 160MHz 8086 0510 Wi-Fi 6 AX210 160MHz 8086 0a10 Wi-Fi 6 AX210 160MHz @@ -31363,6 +32602,13 @@ 8086 6024 Wi-Fi 6 AX210 160MHz 8086 e020 Wi-Fi 6 AX210 160MHz 8086 e024 Wi-Fi 6 AX210 160MHz + 272b Wi-Fi 7(802.11be) AX1775*/AX1790*/BE20*/BE401/BE1750* 2x2 + 8086 00f0 BE200 320MHz [Gale Peak] + 8086 00f4 BE200 320MHz [Gale Peak] + 8086 02f4 BE202 160MHz [Misty Peak] + 8086 40f0 BE200 320MHz [Gale Peak] + 8086 42f4 BE202 160MHz [Misty Peak] + 8086 e0f4 BE200 320MHz [Gale Peak] 2770 82945G/GZ/P/PL Memory Controller Hub 1028 01ad OptiPlex GX620 103c 2a3b Pavilion A1512X @@ -31768,20 +33014,20 @@ 103c 2a6f Asus IPIBL-LB Motherboard 1043 8277 P5K PRO Motherboard: 82801IR [ICH9R] 1462 7345 MS-7345 Motherboard: Intel 82801I/IR [ICH9/ICH9R] - 2823 C610/X99 series chipset sSATA Controller [RAID mode] + 2823 sSATA Controller [RAID Mode] 2824 82801HB (ICH8) 4 port SATA Controller [AHCI mode] 1043 81ec P5B 2825 82801HR/HO/HH (ICH8R/DO/DH) 2 port SATA Controller [IDE mode] 1028 01da OptiPlex 745 1462 7235 P965 Neo MS-7235 mainboard - 2826 C600/X79 series chipset SATA RAID Controller + 2826 SATA Controller [RAID Mode] 1d49 0100 Intel RSTe SATA Software RAID 1d49 0101 Intel RSTe SATA Software RAID 1d49 0102 Intel RSTe SATA Software RAID 1d49 0103 Intel RSTe SATA Software RAID 1d49 0104 Intel RSTe SATA Software RAID 1d49 0105 Intel RSTe SATA Software RAID - 2827 C610/X99 series chipset sSATA Controller [RAID mode] + 2827 sSATA Controller [RAID Mode] 2828 82801HM/HEM (ICH8M/ICH8M-E) SATA Controller [IDE mode] 1028 01f3 Inspiron 1420 103c 30c0 Compaq 6710b @@ -31803,6 +33049,7 @@ 282a 82801 Mobile SATA Controller [RAID mode] 1028 040b Latitude E6510 e4bf 50c1 PC1-GROOVE + 282f tSATA Controller [RAID Mode] 2830 82801H (ICH8 Family) USB UHCI Controller #1 1025 0121 Aspire 5920G 1028 01da OptiPlex 745 @@ -31937,27 +33184,32 @@ 1028 01da OptiPlex 745 103c 30c1 Compaq 6910p 1043 1017 X58LE + 1043 81ec P5B 104d 902d VAIO VGN-NR120E 17aa 20ad ThinkPad T61/R61 17c0 4083 Medion WIM 2210 Notebook PC [MD96850] 2841 82801H (ICH8 Family) PCI Express Port 2 103c 30c1 Compaq 6910p 1043 1017 X58LE + 1043 81ec P5B 104d 902d VAIO VGN-NR120E 17aa 20ad ThinkPad T61/R61 17c0 4083 Medion WIM 2210 Notebook PC [MD96850] 2843 82801H (ICH8 Family) PCI Express Port 3 1043 1017 X58LE + 1043 81ec P5B 104d 902d VAIO VGN-NR120E 17aa 20ad ThinkPad T61/R61 17c0 4083 Medion WIM 2210 Notebook PC [MD96850] 2845 82801H (ICH8 Family) PCI Express Port 4 1043 1017 X58LE + 1043 81ec P5B 17aa 20ad ThinkPad T61/R61 17c0 4083 Medion WIM 2210 Notebook PC [MD96850] 2847 82801H (ICH8 Family) PCI Express Port 5 1028 01da OptiPlex 745 103c 30c1 Compaq 6910p + 1043 81ec P5B 17aa 20ad ThinkPad T61/R61 17c0 4083 Medion WIM 2210 Notebook PC [MD96850] 2849 82801H (ICH8 Family) PCI Express Port 6 @@ -32823,6 +34075,7 @@ 2ffc Xeon E7 v3/Xeon E5 v3/Core i7 System Address Decoder & Broadcast Registers 2ffd Xeon E7 v3/Xeon E5 v3/Core i7 System Address Decoder & Broadcast Registers 2ffe Xeon E7 v3/Xeon E5 v3/Core i7 System Address Decoder & Broadcast Registers + 3101 Killer E3100X 2.5 Gigabit Ethernet Controller 3140 Easel/Monette Hill Image Processor [Pixel Visual Core] 3165 Wireless 3165 8086 4010 Dual Band Wireless AC 3165 [Stone Peak 1x1] @@ -33226,6 +34479,7 @@ 37d9 X722 Hyper-V Virtual Function 3882 Ice Lake LPC Controller 38a4 Ice Lake SPI Controller + 38c8 Ice Lake-LP Smart Sound Technology Audio Controller 38e0 Ice Lake Management Engine Interface 3a00 82801JD/DO (ICH10 Family) 4-port SATA IDE Controller 3a02 82801JD/DO (ICH10 Family) SATA AHCI Controller @@ -33800,6 +35054,8 @@ 4538 Elkhart Lake PCI-e Root Complex 4555 Elkhart Lake [UHD Graphics Gen11 16EU] 4571 Elkhart Lake [UHD Graphics Gen11 32EU] +# Seems to be different than ID 4602 + 4601 Alder Lake-U15 Host and DRAM Controller 4602 Alder Lake Host and DRAM Controller 460d 12th Gen Core Processor PCI Express x16 Controller #1 461d Alder Lake Innovation Platform Framework Processor Participant @@ -33822,8 +35078,10 @@ 4641 12th Gen Core Processor Host Bridge/DRAM Registers 1028 0b10 Precision 3571 464d 12th Gen Core Processor PCI Express x4 Controller #0 + 464e Alder Lake-N Thunderbolt 4 USB Controller 464f 12th Gen Core Processor Gaussian & Neural Accelerator 1028 0b10 Precision 3571 + 4650 12th Gen Core Processor Host Bridge 465d Alder Lake Imaging Signal Processor 4660 12th Gen Core Processor Host Bridge/DRAM Registers 4668 12th Gen Core Processor Host Bridge/DRAM Registers @@ -33848,6 +35106,7 @@ 46b0 AlderLake-P [Iris Xe Graphics] 46b1 AlderLake-P [Iris Xe Graphics] 46b3 Alder Lake-UP3 GT1 [UHD Graphics] + 1025 161d N22C6 [Extensa 15 EX215-55] 46b6 AlderLake-P [Iris Xe Graphics] 46b8 AlderLake-P [Iris Xe Graphics] 46ba AlderLake-P [Iris Xe Graphics] @@ -33857,6 +35116,8 @@ 46d0 Alder Lake-N [UHD Graphics] 46d1 Alder Lake-N [UHD Graphics] 46d2 Alder Lake-N [UHD Graphics] + 46d3 Alder Lake-N [Intel Graphics] + 46d4 Alder Lake-N [Intel Graphics] 4905 DG1 [Iris Xe MAX Graphics] 4906 DG1 [Iris Xe Pod] 4907 SG1 [Server GPU SG-18M] @@ -33864,8 +35125,13 @@ 4908 DG1 [Iris Xe Graphics] 4909 DG1 [Iris Xe MAX 100] 4940 4xxx Series QAT - 4942 4xxx Series QAT - 4944 4xxx Series QAT + 4941 4xxx Series QAT Virtual Function + 4942 401xx Series QAT + 4943 401xx Series QAT Virtual Function + 4944 402xx Series QAT + 4945 402xx Series QAT Virtual Function + 4946 420xx Series QAT + 4947 420xx Series QAT Virtual Function 4b00 Elkhart Lake eSPI Controller 4b23 Elkhart Lake SMBus Controller 4b24 Elkhart Lake SPI (Flash) Controller @@ -33885,21 +35151,45 @@ 4c8b RocketLake-S GT1 [UHD Graphics 730] 4c90 RocketLake-S GT1 [UHD Graphics P750] 4c9a RocketLake-S [UHD Graphics] + 4d87 Jasper Lake eSPI Controller 4da3 Jasper Lake SMBus 4da4 Jasper Lake SPI Controller + 4da8 Jasper Lake Serial IO UART Controller #0 + 4dab Jasper Lake Serial IO SPI Controller #1 + 4db8 Jasper Lake PCIe Root Port #1 + 4db9 Jasper Lake PCIe Root Port #2 + 4dbc Jasper Lake PCIe Root Port #5 + 4dbe Jasper Lake PCIe Root Port #7 + 4dc4 Jasper Lake eMMC Controller + 4dc5 Jasper Lake Serial IO I2C Host Controller #4 + 4dc6 Jasper Lake Serial IO I2C Host Controller #5 4dc8 Jasper Lake HD Audio + 4dd3 Jasper Lake SATA AHCI Controller 4de0 Management Engine Interface - 4de8 Serial IO I2C Host Controller - 4de9 Serial IO I2C Host Controller + 4de8 Jasper Lake Serial IO I2C Host Controller #0 + 4de9 Jasper Lake Serial IO I2C Host Controller #1 + 4dea Jasper Lake Serial IO I2C Host Controller #2 + 4deb Jasper Lake Serial IO I2C Host Controller #3 + 4ded Jasper Lake USB 3.1 xHCI Host Controller + 4def Jasper Lake Shared SRAM 4df0 Wi-Fi 6 AX201 160MHz + 4df8 Jasper Lake SD Controller 4e03 Dynamic Tuning service 4e19 JasperLake IPU 4e55 JasperLake [UHD Graphics] 4e61 JasperLake [UHD Graphics] 4e71 JasperLake [UHD Graphics] - 4f80 DG2 - 4f81 DG2 - 4f82 DG2 + 4f80 DG2 [Intel Xe Graphics] + 4f81 DG2 [Intel Xe Graphics] + 4f82 DG2 [Intel Xe Graphics] + 4f83 DG2 [Intel Xe Graphics] + 4f84 DG2 [Intel Xe Graphics] + 4f85 DG2 [Intel Xe Graphics] + 4f86 DG2 [Intel Xe Graphics] + 4f87 DG2 [Intel Xe Graphics] + 4f88 DG2 [Intel Xe Graphics] + 4f89 ACMP [Xe Graphics] + 4f8c ACMP [Xe Graphics] 4f90 DG2 Audio Controller 4f91 DG2 Audio Controller 4f92 DG2 Audio Controller @@ -33955,10 +35245,13 @@ 1028 0b10 Precision 3571 5187 Alder Lake LPC Controller 519d Raptor Lake LPC/eSPI Controller + 1028 0c06 Precision 3580 51a3 Alder Lake PCH-P SMBus Host Controller 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51a4 Alder Lake-P PCH SPI Controller 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51a8 Alder Lake PCH UART #0 51a9 Alder Lake PCH UART #1 51aa Alder Lake SPI Controller @@ -33966,12 +35259,14 @@ 51b0 Alder Lake PCI Express Root Port #9 51b1 Alder Lake PCI Express x1 Root Port #10 51bb Alder Lake-P PCH PCIe Root Port #4 + 51bd Alder Lake-P PCH PCIe Root Port #6 51bf Alder Lake PCH-P PCI Express Root Port #9 51c5 Alder Lake-P Serial IO I2C Controller #0 51c6 Alder Lake-P Serial IO I2C Controller #1 51c8 Alder Lake PCH-P High Definition Audio Controller 1028 0b10 Precision 3571 51ca Raptor Lake-P/U/H cAVS + 1028 0c06 Precision 3580 51cc Alder Lake Smart Sound Technology Audio Controller 51d3 Alder Lake-P SATA AHCI Controller 1028 0b10 Precision 3571 @@ -33979,33 +35274,65 @@ 51d9 Alder Lake-P Serial IO I2C Controller #3 51e0 Alder Lake PCH HECI Controller 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51e3 Alder Lake AMT SOL Redirection 1028 0b10 Precision 3571 51e8 Alder Lake PCH Serial IO I2C Controller #0 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51e9 Alder Lake PCH Serial IO I2C Controller #1 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51ea Alder Lake PCH Serial IO I2C Controller #2 51eb Alder Lake PCH Serial IO I2C Controller #3 51ed Alder Lake PCH USB 3.2 xHCI Host Controller 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51ef Alder Lake PCH Shared SRAM 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 51f0 Alder Lake-P PCH CNVi WiFi - 8086 0034 Wireless-AC 9560 160MHz - 8086 0070 Wi-Fi 6 AX201 160MHz - 8086 0074 Wi-Fi 6 AX201 160MHz - 8086 0094 Wi-Fi 6E AX211 160MHz - 8086 4070 Wi-Fi 6 AX201 160MHz - 8086 4090 Wi-Fi 6E AX211 160MHz + 1a56 1652 Dual Band Wi-Fi 6(802.11ax) Killer AX1650i 160MHz 2x2 [Cyclone Peak] + 1a56 1671 Dual Band Wi-Fi 6E(802.11ax) AX1675s 160MHz 2x2 [Garfield Peak] + 1a56 1672 Dual Band Wi-Fi 6E(802.11ax) AX1675i 160MHz 2x2 [Garfield Peak] + 1a56 1692 Simultaneous Dual Band(Double Connect) Wi-Fi 6E AX1690i 160MHz 2x2 [Garfield Peak] + 8086 0034 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9560 160MHz 2x2 [Jefferson Peak] + 8086 0070 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] + 8086 0074 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] + 8086 0094 Dual Band Wi-Fi 6E(802.11ax) AX211 160MHz 2x2 [Garfield Peak] + 8086 00b4 Simultaneous Dual Band(Double Connect) Wi-Fi 6E AX411 160MHz 2x2 [Garfield Peak] + 8086 0234 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9560 80MHz 2x2 [Jefferson Peak] + 8086 0244 Single Band Wi-Fi 6(802.11ax) AX101 80MHz 1x1 [Harrison Peak] + 8086 0264 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9461 80MHz 1x1 [Jefferson Peak] + 8086 0274 Dual Band Wi-Fi E(802.11ax) AX203 80MHz 2x2 [Johnson Peak] + 8086 02a4 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9462 80MHz 1x1 [Jefferson Peak] + 8086 4070 Dual Band Wi-Fi 6(802.11ax) AX201 160MHz 2x2 [Harrison Peak] + 8086 4090 Dual Band Wi-Fi 6E(802.11ax) AX211 160MHz 2x2 [Garfield Peak] + 8086 42a4 Dual Band Wi-Fi 5(802.11ac) Wireless-AC 9462 80MHz 1x1 [Jefferson Peak] 51f1 Raptor Lake PCH CNVi WiFi + 8086 4090 Wi-Fi 6E AX211 160MHz 51fc Alder Lake-P Integrated Sensor Hub 1028 0b10 Precision 3571 + 1028 0c06 Precision 3580 5200 EtherExpress PRO/100 Intelligent Server PCI Bridge 5201 EtherExpress PRO/100 Intelligent Server Fast Ethernet Controller 8086 0001 EtherExpress PRO/100 Server Ethernet Adapter 530d 80310 (IOP) IO Processor + 5481 Alder Lake-N PCH eSPI Controller + 54a3 Alder Lake-N SMBus + 54a4 Alder Lake-N SPI (flash) Controller + 54a8 Alder Lake-N Serial IO UART Host Controller + 54b0 Alder Lake-N PCI Express Root Port #9 + 54b1 Alder Lake-N PCI Express Root Port #10 + 54b2 Alder Lake-N PCI Express Root Port #11 + 54b3 Alder Lake-N PCI Express Root Port #12 + 54c8 Alder Lake-N PCH High Definition Audio Controller + 54d3 Alder Lake-N SATA AHCI Controller + 54e0 Alder Lake-N PCH HECI Controller + 54ed Alder Lake-N PCH USB 3.2 xHCI Host Controller + 54ef Alder Lake-N PCH Shared SRAM 54f0 CNVi: Wi-Fi + 8086 0244 Wi-Fi 6 AX101NGW 5502 Ethernet Controller (2) I225-LMvP 1ab6 0225 TS4 On-Board 2.5GbE Ethernet Adaptor 5690 DG2 [Arc A770M] @@ -34031,8 +35358,15 @@ 56b1 DG2 [Arc Pro A40/A50] 56b2 DG2 [Arc Pro A60M] 56b3 DG2 [Arc Pro A60] + 56ba DG2 [Intel Graphics] + 56bb DG2 [Intel Graphics] + 56bc DG2 [Intel Graphics] + 56bd DG2 [Intel Graphics] + 56be DG2 [Arc Graphics A750E] + 56bf DG2 [Arc Graphics A580E] 56c0 ATS-M [Data Center GPU Flex 170] 56c1 ATS-M [Data Center GPU Flex 140] + 56c2 ATS-M [Data Center GPU Flex 170V] 5780 Thunderbolt 80/120G Bridge [Barlow Ridge Host 80G 2023] 5781 Thunderbolt 80/120G NHI [Barlow Ridge Host 80G 2023] 5782 Thunderbolt 80/120G USB Controller [Barlow Ridge Host 80G 2023] @@ -34046,8 +35380,12 @@ 579e Ethernet Connection E825-C for SFP 57a4 Thunderbolt Bridge [Barlow Ridge Hub 40G 2023] 57a5 Thunderbolt USB Controller [Barlow Ridge Hub 40G 2023] - 57b1 Ethernet Controller E610 1GBASE T + 57ae Ethernet Controller E610 Backplane + 57af Ethernet Controller E610 SFP + 57b0 Ethernet Controller E610 10GBASE T + 57b1 Ethernet Controller E610 2.5GBASE T 8086 0000 Ethernet Converged Network Adapter E610 + 57b2 Ethernet Controller E610 SGMII 5845 QEMU NVM Express Controller 1af4 1100 QEMU Virtual Machine 5900 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers @@ -34063,11 +35401,15 @@ 590b HD Graphics 610 590c Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers 590f Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING 1462 7a68 B250 KRAIT GAMING (MS-7A68) 1462 7a72 H270 PC MATE 5910 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers 5911 Xeon E3-1200 v6/7th Gen Core Processor Gaussian Mixture Model 5912 HD Graphics 630 + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING 1462 7a72 H270 PC MATE 5914 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers 17aa 225d ThinkPad T480 @@ -34123,6 +35465,10 @@ 5ae8 Celeron N3350/Pentium N4200/Atom E3900 Series Low Pin Count Interface 5aee Celeron N3350/Pentium N4200/Atom E3900 Series HSUART Controller #4 5af0 Celeron N3350/Pentium N4200/Atom E3900 Series Host Bridge + 6420 Lunar Lake [Intel Graphics] + 643e Lunar Lake NPU + 64a0 Lunar Lake [Intel Graphics] + 64b0 Lunar Lake [Intel Graphics] 65c0 5100 Chipset Memory Controller Hub 65e2 5100 Chipset PCI Express x4 Port 2 65e3 5100 Chipset PCI Express x4 Port 3 @@ -34412,8 +35758,30 @@ 10b4 202f Lightspeed 740 8086 0000 Terminator 2x/i 8086 0100 Intel740 Graphics Accelerator + 7a04 Raptor Lake LPC/eSPI Controller + 7a23 Raptor Lake-S PCH SMBus Controller + 7a24 Raptor Lake SPI (flash) Controller + 7a27 Raptor Lake-S PCH Shared SRAM + 7a30 Raptor Lake PCI Express Root Port #9 + 7a38 Raptor Lake PCI Express Root Port #1 + 7a3a Raptor Point-S PCH - PCI Express Root Port 3 + 7a3b Raptor Lake PCI Express Root Port #4 + 7a40 Raptor Lake PCI Express Root Port #17 + 7a44 Raptor Lake PCI Express Root Port #21 + 7a48 Raptor Lake PCI Express Root Port #25 + 7a4c Raptor Lake Serial IO I2C Host Controller #0 + 7a4d Raptor Lake Serial IO I2C Host Controller #1 + 7a4e Raptor Lake Serial IO I2C Host Controller #2 + 7a50 Raptor Lake High Definition Audio Controller + 7a60 Raptor Lake USB 3.2 Gen 2x2 (20 Gb/s) XHCI Host Controller + 7a62 Raptor Lake SATA AHCI Controller + 7a68 Raptor Lake CSME HECI #1 + 7a69 Raptor Lake CSME HECI #2 + 7a70 Raptor Lake-S PCH CNVi WiFi + 8086 0090 WiFi 6E AX211 160MHz # Unlike other PCH components. The eSPI controller is specific to each chipset model 7a84 Z690 Chipset LPC/eSPI Controller + 7a85 Alder Lake-S PCH PCI Express Root Port #????? 7aa3 Alder Lake-S PCH SMBus Controller 7aa4 Alder Lake-S PCH SPI Controller 7aa7 Alder Lake-S PCH Shared SRAM @@ -34423,9 +35791,11 @@ 7ab4 Alder Lake-S PCH PCI Express Root Port #13 7ab8 Alder Lake-S PCH PCI Express Root Port #1 7ab9 Alder Lake-S PCH PCI Express Root Port #2 + 7aba Alder Lake-S PCH PCI Express Root Port #3 7abc Alder Lake-S PCH PCI Express Root Port #5 7abd Alder Lake-S PCH PCI Express Root Port #6 7abf Alder Lake-S PCH PCI Express Root Port #8 + 7ac4 Alder Lake-S PCH PCI Express Root Port #21 7ac8 Alder Lake-S PCH PCI Express Root Port #25 7acc Alder Lake-S PCH Serial IO I2C Controller #0 7acd Alder Lake-S PCH Serial IO I2C Controller #1 @@ -34441,11 +35811,19 @@ 8086 0094 Wi-Fi 6 AX201 160MHz 7afc Alder Lake-S PCH Serial IO I2C Controller #4 7afd Alder Lake-S PCH Serial IO I2C Controller #5 + 7d03 Meteor Lake-P Dynamic Tuning Technology 7d0b Volume Management Device NVMe RAID Controller Intel Corporation + 7d0d Meteor Lake-P Platform Monitoring Technology + 7d19 Meteor Lake IPU + 7d1d Meteor Lake NPU 7d40 Meteor Lake-M [Intel Graphics] + 7d41 Arrow Lake-U [Intel Graphics] 7d45 Meteor Lake-P [Intel Graphics] + 7d51 Arrow Lake-P [Intel Graphics] 7d55 Meteor Lake-P [Intel Arc Graphics] 7d60 Meteor Lake-M [Intel Graphics] + 7d67 Arrow Lake-U [Intel Graphics] + 7dd1 Arrow Lake-P [Intel Graphics] 7dd5 Meteor Lake-P [Intel Graphics] 7e01 Meteor Lake-P LPC/eSPI Controller 7e22 Meteor Lake-P SMBus Controller @@ -34456,10 +35834,17 @@ 7e27 Meteor Lake-P Serial IO SPI Controller #0 7e28 Meteor Lake-P HD Audio Controller 7e30 Meteor Lake-P Serial IO SPI Controller #1 + 7e40 Meteor Lake PCH CNVi WiFi + 8086 0094 Wi-Fi 6E AX211 160MHz +# Refer from Intel Meteor Lake EDS (doc#640228) under its "Device IDs" section. + 7e45 Meteor Lake-P Integrated Sensor Hub 7e46 Meteor Lake-P Serial IO SPI Controller #2 + 7e4c Meteor Lake-P Gaussian & Neural-Network Accelerator 7e50 Meteor Lake-P Serial IO I2C Controller #4 7e51 Meteor Lake-P Serial IO I2C Controller #5 7e52 Meteor Lake-P Serial IO UART Controller #2 + 7e70 Meteor Lake-P CSME HECI #1 + 7e73 Meteor Lake-P Keyboard and Text (KT) Redirection 7e78 Meteor Lake-P Serial IO I2C Controller #0 7e79 Meteor Lake-P Serial IO I2C Controller #1 7e7a Meteor Lake-P Serial IO I2C Controller #2 @@ -34848,6 +36233,7 @@ 1028 09be Latitude 7410 9b43 10th Gen Core Processor Host Bridge/DRAM Registers 9b44 10th Gen Core Processor Host Bridge/DRAM Registers + 9b51 10th Gen Core Processor Host Bridge/DRAM Registers 9b53 Comet Lake-S 6c Host Bridge/DRAM Controller 9b54 10th Gen Core Processor Host Bridge/DRAM Registers 9b61 Comet Lake-U v1 4c Host Bridge/DRAM Controller @@ -35196,6 +36582,8 @@ 8086 0244 Wi-Fi 6 AX101NGW a0fc Tiger Lake-LP Integrated Sensor Hub a102 Q170/Q150/B150/H170/H110/Z170/CM236 Chipset SATA Controller [AHCI Mode] + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING a103 HM170/QM170 Chipset SATA Controller [AHCI Mode] 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv @@ -35208,11 +36596,14 @@ a112 100 Series/C230 Series Chipset Family PCI Express Root Port #3 a113 100 Series/C230 Series Chipset Family PCI Express Root Port #4 a114 100 Series/C230 Series Chipset Family PCI Express Root Port #5 + 1043 8694 H110I-PLUS Motherboard a115 100 Series/C230 Series Chipset Family PCI Express Root Port #6 a116 100 Series/C230 Series Chipset Family PCI Express Root Port #7 a117 100 Series/C230 Series Chipset Family PCI Express Root Port #8 a118 100 Series/C230 Series Chipset Family PCI Express Root Port #9 + 1043 8694 H110I-PLUS Motherboard a119 100 Series/C230 Series Chipset Family PCI Express Root Port #10 + 1043 8694 H110I-PLUS Motherboard a11a 100 Series/C230 Series Chipset Family PCI Express Root Port #11 a11b 100 Series/C230 Series Chipset Family PCI Express Root Port #12 a11c 100 Series/C230 Series Chipset Family PCI Express Root Port #13 @@ -35223,10 +36614,14 @@ a121 100 Series/C230 Series Chipset Family Power Management Controller 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING a122 Sunrise Point-H cAVS a123 100 Series/C230 Series Chipset Family SMBus 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING a124 100 Series/C230 Series Chipset Family SPI Controller a125 100 Series/C230 Series Chipset Family Gigabit Ethernet Controller a126 100 Series/C230 Series Chipset Family Trace Hub @@ -35237,15 +36632,20 @@ a12f 100 Series/C230 Series Chipset Family USB 3.0 xHCI Controller 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING a130 100 Series/C230 Series Chipset Family USB Device Controller (OTG) a131 100 Series/C230 Series Chipset Family Thermal Subsystem 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv + 1462 7994 H110M ECO/GAMING a133 Sunrise Point-H Northpeak ACPI Function a135 100 Series/C230 Series Chipset Family Integrated Sensor Hub a13a 100 Series/C230 Series Chipset Family MEI Controller #1 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING a13b 100 Series/C230 Series Chipset Family MEI Controller #2 a13c 100 Series/C230 Series Chipset Family IDE Redirection a13d 100 Series/C230 Series Chipset Family KT Redirection @@ -35254,6 +36654,8 @@ a141 Sunrise Point-H LPC Controller a142 Sunrise Point-H LPC Controller a143 H110 Chipset LPC/eSPI Controller + 1043 8694 H110I-PLUS Motherboard + 1462 7994 H110M ECO/GAMING a144 H170 Chipset LPC/eSPI Controller a145 Z170 Chipset LPC/eSPI Controller a146 Q170 Chipset LPC/eSPI Controller @@ -35299,6 +36701,8 @@ a170 100 Series/C230 Series Chipset Family HD Audio Controller 1028 06e4 XPS 15 9550 103c 825b OMEN-17-w001nv + 1043 86c7 H110I-PLUS Motherboard + 1462 f994 H110M ECO/GAMING a171 CM238 HD Audio Controller a182 C620 Series Chipset Family SATA Controller [AHCI mode] a186 C620 Series Chipset Family SATA Controller [RAID mode] @@ -35409,6 +36813,8 @@ a2ba 200 Series PCH CSME HECI #1 1462 7a72 H270 PC MATE a2bb 200 Series PCH CSME HECI #2 +# AMT serial over LAN + a2bd 200 Series Chipset Family KT Redirection a2c4 200 Series PCH LPC Controller (H270) 1462 7a72 H270 PC MATE a2c5 200 Series PCH LPC Controller (Z270) @@ -35439,6 +36845,7 @@ 1028 0869 Vostro 3470 a305 Z390 Chipset LPC/eSPI Controller a306 Q370 Chipset LPC/eSPI Controller + a308 300 Series Chipset Family LPC Controller a309 Cannon Point-LP LPC Controller a30c QM370 Chipset LPC/eSPI Controller a30d HM470 Chipset LPC/eSPI Controller @@ -35509,19 +36916,29 @@ a3eb Comet Lake PCI Express Root Port #21 a3f0 Comet Lake PCH-V cAVS a620 6400/6402 Advanced Memory Buffer (AMB) + a703 Raptor Lake-S Host Bridge/DRAM Controller + a706 Raptor Lake-P 6p+8e cores Host Bridge/DRAM Controller + 1028 0c06 Precision 3580 a707 Raptor Lake-P/U 4p+8e cores Host Bridge/DRAM Controller a708 Raptor Lake-P/U 2p+8e cores Host Bridge/DRAM Controller + a70d Raptor Lake PCI Express 5.0 Graphics Port (PEG010) a71d Raptor Lake Dynamic Platform and Thermal Framework Processor Participant + 1028 0c06 Precision 3580 a71e Raptor Lake-P Thunderbolt 4 USB Controller + 1028 0c06 Precision 3580 a720 Raptor Lake-P [UHD Graphics] a721 Raptor Lake-P [UHD Graphics] a72f Raptor Lake-P Thunderbolt 4 PCI Express Root Port #2 a73e Raptor Lake-P Thunderbolt 4 NHI #0 + 1028 0c06 Precision 3580 + a740 Raptor Lake-S 8+12 - Host Bridge/DRAM Controller a74d Raptor Lake PCIe 4.0 Graphics Port a74f GNA Scoring Accelerator module + 1028 0c06 Precision 3580 a76d Raptor Lake-P Thunderbolt 4 NHI #1 a76e Raptor Lake-P Thunderbolt 4 PCI Express Root Port #0 a77d Raptor Lake Crashlog and Telemetry + 1028 0c06 Precision 3580 a77f Volume Management Device NVMe RAID Controller Intel Corporation a780 Raptor Lake-S GT1 [UHD Graphics 770] a781 Raptor Lake-S UHD Graphics @@ -35532,6 +36949,7 @@ a78a Raptor Lake-S UHD Graphics a78b Raptor Lake-S UHD Graphics a7a0 Raptor Lake-P [Iris Xe Graphics] + 1028 0c06 Precision 3580 a7a1 Raptor Lake-P [Iris Xe Graphics] a7a8 Raptor Lake-P [UHD Graphics] a7a9 Raptor Lake-P [UHD Graphics] @@ -35539,8 +36957,38 @@ a7ab Raptor Lake-P [Intel Graphics] a7ac Raptor Lake-U [Intel Graphics] a7ad Raptor Lake-U [Intel Graphics] + a806 Lunar Lake-M LPC/eSPI Controller + a822 Lunar Lake-M SMbus Controller + a823 Lunar Lake-M SPI Controller + a824 Lunar Lake-M Trace Hub + a825 Lunar Lake-M Serial IO UART Controller #0 + a826 Lunar Lake-M Serial IO UART Controller #1 + a827 Lunar Lake-M Serial IO SPI Controller #0 + a828 Lunar Lake-M HD Audio Controller + a830 Lunar Lake-M Serial IO SPI Controller #1 + a831 Lunar Lake-M Thunderbolt 4 USB Controller + a833 Lunar Lake-M Thunderbolt 4 NHI #0 + a834 Lunar Lake-M Thunderbolt 4 NHI #1 + a838 Lunar Lake-M PCI Express Root Port #1 + a839 Lunar Lake-M PCI Express Root Port #2 + a83a Lunar Lake-M PCI Express Root Port #3 + a83b Lunar Lake-M PCI Express Root Port #4 + a83c Lunar Lake-M PCI Express Root Port #5 + a83d Lunar Lake-M PCI Express Root Port #6 + a845 Lunar Lake-M Integrated Sensor Hub + a847 Lunar Lake-M UFS Controller + a84e Lunar Lake-M Thunderbolt 4 PCI Express Root Port #0 + a84f Lunar Lake-M Thunderbolt 4 PCI Express Root Port #1 + a860 Lunar Lake-M Thunderbolt 4 PCI Express Root Port #2 + a878 Lunar Lake-M Serial IO I2C Controller #0 + a879 Lunar Lake-M Serial IO I2C Controller #1 + a87a Lunar Lake-M Serial IO I2C Controller #2 + a87b Lunar Lake-M Serial IO I2C Controller #3 + a87d Lunar Lake-M USB 3.2 Gen 2x1 xHCI Host Controller abc0 Omni-Path Fabric Switch Silicon 100 Series ad0b Volume Management Device NVMe RAID Controller Intel Corporation + ad1d Arrow Lake NPU + b03e Panther Lake NPU b152 21152 PCI-to-PCI Bridge 8086 b152 21152 PCI-to-PCI Bridge # observed, and documented in Intel revision note; new mask of 1011:0026 @@ -35577,6 +37025,11 @@ d156 Core Processor Semaphore and Scratchpad Registers d157 Core Processor System Control and Status Registers d158 Core Processor Miscellaneous Registers + e202 Battlemage G21 [Intel Graphics] + e20b Battlemage G21 [Intel Graphics] + e20c Battlemage G21 [Intel Graphics] + e20d Battlemage G21 [Intel Graphics] + e212 Battlemage G21 [Intel Graphics] f1a5 SSD 600P Series 8086 390a SSDPEKKW256G7 256GB f1a6 SSD DC P4101/Pro 7600p/760p/E 6100p Series @@ -35588,7 +37041,14 @@ 8088 Beijing Wangxun Technology Co., Ltd. 0100 WX1860AL-W Gigabit Ethernet Controller 0101 WX1860A2 Gigabit Ethernet Controller + 4c52 2024 LRES2024PT Dual-port 1Gb Ethernet Network Adapter + 4c52 2025 LRES2025PT Quad-port 1Gb Ethernet Network Adapter + 4c52 2027 LRES2027PF Dual-port 1Gb Ethernet Server Adapter + 4c52 3018 LRES3018PT Dual-port 1Gb Ethernet Server Adapter for OCP 8088 0201 Dual-Port Ethernet Network Adaptor SF200T + 8088 0501 Dual-Port Ethernet Network Adapter SF200T-C101 + 8088 0901 Dual-Port Ethernet Network Adapter SF200T-B401 + 8088 0b01 Dual-Port Ethernet Network Adapter SF200T-B402 8088 4201 Dual-Port Ethernet Network Adaptor SF200T (WOL) 8088 8201 Dual-Port Ethernet Network Adaptor SF200T (NCSI) 8088 c201 Dual-Port Ethernet Network Adaptor SF200T (WOL, NCSI) @@ -35596,8 +37056,12 @@ 8088 0210 Dual-Port Ethernet Network Adaptor SF200T-S 0103 WX1860A4 Gigabit Ethernet Controller 1bd4 009e ENPW2100-T4 + 4c52 2028 LRES2028PF Quad-port 1Gb Ethernet Server Adapter + 4c52 3019 LRES3019PT Quad-port 1Gb Ethernet Server Adapter for OCP 8088 0401 Qual-Port Ethernet Network Adaptor SF400T 8088 0440 Qual-Port Ethernet Network Adaptor SF400-OCP + 8088 0a01 Quad-Port Ethernet Network Adapter SF400T-B401 + 8088 0c01 Quad-Port Ethernet Network Adapter SF400T-B402 8088 4401 Quad-Port Ethernet Network Adapter SF400T (WOL) 8088 8103 Quad-Port Ethernet Network Adaptor SF400T (NCSI) 8088 8401 Quad-Port Ethernet Network Adapter SF400T (NCSI) @@ -35621,7 +37085,10 @@ 8088 0420 Qual-Port Ethernet Network Adaptor SF400HT-S 0109 WX1860-LC Gigabit Ethernet Controller 010a WX1860A1 Gigabit Ethernet Controller + 4c52 2026 LRES2026PF Single-port 1Gb Ethernet Network Adapter + 4c52 2034 LRES2034PT Single-port 1Gb Ethernet Network Adapter 010b WX1860AL1 Gigabit Ethernet Controller + 4c52 2215 LRES2215PT Single-port 1Gb Ethernet Network Adapter 8088 0102 Single-Port Ethernet Network Adaptor SF100HT 8088 4102 Single-Port Ethernet Network Adaptor SF100HT (WOL) 8088 8102 Single-Port Ethernet Network Adaptor SF100HT (NCSI) @@ -35633,15 +37100,18 @@ 0119 WX1860-LC Gigabit Ethernet Controller Virtual Function 011a WX1860A1 Gigabit Ethernet Controller Virtual Function 011b WX1860AL1 Gigabit Ethernet Controller Virtual Function - 1000 Ethernet Controller RP1000 Virtual Function for 10GbE SFP+ - 1001 Ethernet Controller RP1000 for 10GbE SFP+ + 1000 Ethernet Controller SP1000A Virtual Function for 10GbE SFP+ + 1001 Ethernet Controller SP1000A for 10GbE SFP+ 1bd4 0084 Ethernet Controller SP1000A for 10GbE SFP+(lldp) 1bd4 0085 Ethernet Controller SP1000A for 10GBASE-T + 4c52 1002 LRES1002PF Dual-port 10Gb Ethernet Server Adapter + 4c52 1003 LRES1003PF Single-port 10Gb Ethernet Server Adapter + 4c52 3001 LRES3001PF Dual-port 10Gb Ethernet Server Adapter for OCP 8088 0000 Ethernet Network Adaptor RP1000 for 10GbE SFP+ 8088 0300 Ethernet Network Adaptor RP1000-A03 for 10GbE SFP+ 8088 0400 Ethernet Network Adaptor RP1000-A04 for 10GbE SFP+ - 2000 Ethernet Controller RP2000 Virtual Function for 10GbE SFP+ - 2001 Ethernet Controller RP2000 for 10GbE SFP+ + 2000 Ethernet Controller WX1820AL Virtual Function for 10GbE SFP+ + 2001 Ethernet Controller WX1820AL for 10GbE SFP+ 8088 2000 Ethernet Network Adaptor RP2000 for 10GbE SFP+ 8088 2300 Ethernet Network Adaptor RP2000-A03 for 10GbE SFP+ 8088 2400 Ethernet Network Adaptor RP2000-A04 for 10GbE SFP+ @@ -35652,7 +37122,17 @@ 8384 SigmaTel 8401 TRENDware International Inc. 8510 Sietium Semiconductor Co., Ltd. - 0201 GenBu02 [GB2062-PCIe-C0] + 0201 GenBu02 Series GPU + 8510 0001 GB2062-PUB-LPDDR + 8510 0002 GB2062-PCIe-C0 + 8510 0003 GB2062-PCIe-C41 + 8510 0004 GB2062-PCIe-HIEILP4 + 8510 0005 CQ2040-PCIe-C21 + 8510 0007 GB2062-PCIe-C40 + 8510 0008 CQ2040-MXM-M60 + 8510 0009 GB2062-PCIe-C20 + 8510 000c CQ2040-PUB + 8510 0201 GB2062-PUB-DDR # nee ScaleMP 8686 SAP 1010 vSMP Foundation controller [vSMP CTL] @@ -35664,21 +37144,36 @@ # Wuxi Micro Innovation Integrated Circuit Design Co.,Ltd. 8848 MUCSE 1000 Ethernet Controller N10 Series for 10GbE or 40GbE (Dual-port) + 4c52 3032 LRES3032PF Dual-port 10Gb Ethernet Server Adapter for OCP 8848 8410 Ethernet Network Adapter N10G-X2-DC for 10GbE SFP+ 2-port 1001 Ethernet Controller N400 Series for 1GbE (Dual-port) 1003 Ethernet Controller N400 Series for 10GbE (Single-port) + 4c52 1050 LRES1050PF Single-port 10Gb Ethernet Network Adapter 1020 Ethernet Controller N10 Series for 10GbE (Quad-port) + 4c52 1030 LRES1030PF Quad-port 10Gb Ethernet Server Adapter + 4c52 1031 LRES1031PF Dual-port 10Gb Ethernet Server Adapter + 4c52 3031 LRES3031PF Quad-port 10Gb Ethernet Server Adapter for OCP 8848 8451 Ethernet Network Adapter N10G-X4-QC for 10GbE SFP+ 4-port 1021 Ethernet Controller N400 Series for 1GbE (Quad-port) + 4c52 1032 LRES1032PF Quad-port 1Gb Ethernet Network Adapter + 4c52 1039 LRES1039PT Quad-port 1Gb Ethernet Network Adapter 1060 Ethernet Controller N10 Series for 1GbE or 10GbE (8-port) 1080 Ethernet Controller N10 Series Virtual Function 1081 Ethernet Controller N400 Series Virtual Function 1083 Ethernet Controller N400 Series Virtual Function 8308 Ethernet Controller N500 Series for 1GbE (Quad-port, Copper RJ45) +# NIC-ETH3M0T-3S-4P Quad-Port RJ45 Adapter for OCP 3.0 + 193d 1088 NIC-ETH3M0T-3S-4P + 4c52 1048 LRES1048PT Quad-port 1Gb Ethernet Network Adapter + 4c52 3044 LRES3044PT Quad-port 1Gb Ethernet Server Adapter for OCP 8309 Ethernet Controller N500 Series Virtual Function 8318 Ethernet Controller N500 Series for 1GbE (Dual-port, Copper RJ45) + 4c52 1049 LRES1049PT Dual-port 1Gb Ethernet Network Adapter + 4c52 3043 LRES3043PT Dual-port 1Gb Ethernet Server Adapter for OCP 8866 T-Square Design Inc. 8888 Silicon Magic +# 4 port HDMI capture card + 8504 AVMatrix VC42 8912 TRX # 8c4a is not Winbond but there is a board misprogrammed 8c4a Winbond @@ -36010,6 +37505,9 @@ 103c 1101 Smart Array P416ie-m SR G10 105b 1211 HBA 8238-16i 105b 1321 HBA 8242-24i + 1137 02f8 24G TriMode M1 RAID 4GB FBWC 32D + 1137 02f9 24G TriMode M1 RAID 4GB FBWC 16D + 1137 02fa 24G TriMode M1 HBA 16D 13fe 8312 SKY-9200 MIC-8312BridgeB 152d 8a22 QS-8204-8i 152d 8a23 QS-8238-16i @@ -36062,6 +37560,7 @@ 1d49 0220 ThinkSystem 4350-8i SAS/SATA 12Gb HBA 1d49 0221 ThinkSystem 4350-16i SAS/SATA 12Gb HBA 1d49 0520 ThinkSystem RAID 5350-8i PCIe 12Gb Adapter + 1d49 0522 ThinkSystem RAID 5350-8i PCIe 12Gb Internal Adapter 1d49 0620 ThinkSystem RAID 9350-8i 2GB Flash PCIe 12Gb Adapter 1d49 0621 ThinkSystem RAID 9350-8i 2GB Flash PCIe 12Gb Internal Adapter 1d49 0622 ThinkSystem RAID 9350-16i 4GB Flash PCIe 12Gb Adapter @@ -36168,7 +37667,13 @@ 1734 1011 PRIMERGY RX300 onboard SCSI 8080 ASC-29320A U320 w/HostRAID 8081 PMC-Sierra PM8001 SAS HBA [Series 6H] + 9005 0400 Adaptec SAS HBA 6405H + 9005 0800 Adaptec SAS HBA 6805H 8088 PMC-Sierra PM8018 SAS HBA [Series 7H] + 9005 0008 Adaptec SAS HBA 7085H + 9005 0016 Adaptec SAS HBA 70165H + 9005 0800 Adaptec SAS HBA 7805H + 9005 1600 Adaptec SAS HBA 71605H 8089 PMC-Sierra PM8019 SAS encryption HBA [Series 7He] 808f AIC-7901 U320 w/HostRAID 1028 0168 Precision Workstation 670 Mainboard @@ -36249,6 +37754,9 @@ 2001 STAR2000E NVMe SSD 2002 STAR2000C NVMe SSD 2003 STAR2000L NVMe SSD + 2004 EAST 2000K SSD + 2008 STAR2008 PCIE NVMe SSD Controller + 2010 STAR2010 PCIE NVMe Secure SSD Controller bb5b Asgard AN3+ NVMe SSD fc22 Asgard AN3+ NVMe SSD a000 Asix Electronics Corporation (Wrong ID) @@ -36344,6 +37852,9 @@ c0a9 Micron/Crucial Technology 5407 P5 Plus NVMe PCIe SSD 540a P2 [Nick P2] / P3 / P3 Plus NVMe PCIe SSD (DRAM-less) 5412 P5 NVMe PCIe SSD[SlashP5] + 5415 T500 NVMe PCIe SSD + 5419 T700 NVMe PCIe SSD + 5421 P3 Plus NVMe PCIe SSD (DRAM-less) c0de Motorola c0fe Motion Engineering, Inc. ca01 I-TEK OptoElectronics Co., LTD. @@ -36608,6 +38119,11 @@ edd8 ARK Logic Inc f043 ASUSTeK Computer Inc. (Wrong ID) f05b Foxconn International, Inc. (Wrong ID) f111 Framework Computer Inc. +f117 Cerio + 1000 Emulated PCIe Switch + 1010 Placeholder Device + 1020 Pseudo-Device + 1030 Test Device f15e SiFive, Inc. 0000 FU740-C000 RISC-V SoC PCI Express x8 to AXI4 Bridge f1d0 AJA Video diff --git a/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/pciids.go b/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/pciids.go index 5f25c0048..343df08d4 100644 --- a/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/pciids.go +++ b/vendor/github.com/NVIDIA/go-nvlib/pkg/pciids/pciids.go @@ -11,42 +11,42 @@ import ( "strings" ) -// token what the Lexer retruns +// token what the Lexer retruns. type token int const ( - // ILLEGAL a token which the Lexer does not understand + // ILLEGAL a token which the Lexer does not understand. ILLEGAL token = iota - // EOF end of file + // EOF end of file. EOF - // WS whitespace + // WS whitespace. WS - // NEWLINE '\n' + // NEWLINE '\n'. NEWLINE - // COMMENT '# something' + // COMMENT '# something'. COMMENT - // VENDOR PCI vendor + // VENDOR PCI vendor. VENDOR - // SUBVENDOR PCI subvendor + // SUBVENDOR PCI subvendor. SUBVENDOR - // DEVICE PCI device + // DEVICE PCI device. DEVICE - // CLASS PCI class + // CLASS PCI class. CLASS - // SUBCLASS PCI subclass + // SUBCLASS PCI subclass. SUBCLASS - // PROGIF PCI programming interface + // PROGIF PCI programming interface. PROGIF ) -// literal values from the Lexer +// literal values from the Lexer. type literal struct { ID string name string SubName string } -// scanner a lexical scanner +// scanner a lexical scanner. type scanner struct { r *bufio.Reader isVendor bool @@ -58,7 +58,7 @@ func newScanner(r io.Reader) *scanner { } // Since the pci.ids is line base we're consuming a whole line rather then only -// a single rune/char +// a single rune/char. func (s *scanner) readline() []byte { ln, err := s.r.ReadBytes('\n') if err == io.EOF { @@ -107,7 +107,7 @@ func isSubVendor(ln []byte) bool { return isLeadingTwoTabs(ln) } func isDevice(ln []byte) bool { return isLeadingOneTab(ln) } func isNewline(ln []byte) bool { return (ln[0] == '\n') } -// List of known device classes, subclasses and programming interfaces +// List of known device classes, subclasses and programming interfaces. func isClass(ln []byte) bool { return (ln[0] == 'C') } func isProgIf(ln []byte) bool { return isLeadingTwoTabs(ln) } func isSubClass(ln []byte) bool { return isLeadingOneTab(ln) } @@ -162,7 +162,7 @@ func (s *scanner) scan() (tok token, lit literal) { return ILLEGAL, literal{ID: string(line)} } -// parser reads the tokens returned by the Lexer and constructs the AST +// parser reads the tokens returned by the Lexer and constructs the AST. type parser struct { s *scanner buf struct { @@ -173,7 +173,7 @@ type parser struct { } // Various locations of pci.ids for different distributions. These may be more -// up to date then the embedded pci.ids db +// up to date then the embedded pci.ids db. var defaultPCIdbPaths = []string{ "/usr/share/misc/pci.ids", // Ubuntu "/usr/local/share/pci.ids", // RHEL like with manual update @@ -202,7 +202,7 @@ func NewDB(opts ...Option) Interface { return newParser(pcidbs).parse() } -// Option defines a function for passing options to the NewDB() call +// Option defines a function for passing options to the NewDB() call. type Option func(*pcidb) // WithFilePath provides an Option to set the file path @@ -216,7 +216,7 @@ func WithFilePath(path string) Option { } // newParser will attempt to read the db pci.ids from well known places or fall -// back to an internal db +// back to an internal db. func newParser(pcidbs []string) *parser { for _, db := range pcidbs { @@ -229,7 +229,7 @@ func newParser(pcidbs []string) *parser { } // We're using go embed above to have the byte array // correctly initialized with the internal shipped db - // if we cannot find an up to date in the filesystem + // if we cannot find an up to date in the filesystem. return newParserFromReader(bufio.NewReader(bytes.NewReader(defaultPCIdb))) } @@ -252,13 +252,13 @@ func (p *parser) unscan() { p.buf.n = 1 } var _ Interface = (*pcidb)(nil) -// Interface returns textual description of specific attributes of PCI devices +// Interface returns textual description of specific attributes of PCI devices. type Interface interface { GetDeviceName(uint16, uint16) (string, error) GetClassName(uint32) (string, error) } -// GetDeviceName return the textual description of the PCI device +// GetDeviceName return the textual description of the PCI device. func (d *pcidb) GetDeviceName(vendorID uint16, deviceID uint16) (string, error) { vendor, ok := d.vendors[vendorID] if !ok { @@ -273,7 +273,7 @@ func (d *pcidb) GetDeviceName(vendorID uint16, deviceID uint16) (string, error) return device.name, nil } -// GetClassName resturn the textual description of the PCI device class +// GetClassName resturn the textual description of the PCI device class. func (d *pcidb) GetClassName(classID uint32) (string, error) { class, ok := d.classes[classID] if !ok { @@ -282,53 +282,53 @@ func (d *pcidb) GetClassName(classID uint32) (string, error) { return class.name, nil } -// pcidb The complete set of PCI vendors and PCI classes +// pcidb The complete set of PCI vendors and PCI classes. type pcidb struct { vendors map[uint16]vendor classes map[uint32]class path string } -// vendor PCI vendors/devices/subVendors/SubDevices +// vendor PCI vendors/devices/subVendors/SubDevices. type vendor struct { name string devices map[uint16]device } -// subVendor PCI subVendor +// subVendor PCI subVendor. type subVendor struct { SubDevices map[uint16]SubDevice } -// SubDevice PCI SubDevice +// SubDevice PCI SubDevice. type SubDevice struct { name string } -// device PCI device +// device PCI device. type device struct { name string subVendors map[uint16]subVendor } -// class PCI classes/subClasses/Programming Interfaces +// class PCI classes/subClasses/Programming Interfaces. type class struct { name string subClasses map[uint32]subClass } -// subClass PCI subClass +// subClass PCI subClass. type subClass struct { name string progIfs map[uint8]progIf } -// progIf PCI Programming Interface +// progIf PCI Programming Interface. type progIf struct { name string } -// parse parses a PCI IDS entry +// parse parses a PCI IDS entry. func (p *parser) parse() Interface { db := &pcidb{ @@ -336,7 +336,7 @@ func (p *parser) parse() Interface { classes: map[uint32]class{}, } - // Used for housekeeping, breadcrumb for aggregated types + // Used for housekeeping, breadcrumb for aggregated types. var hkVendor vendor var hkDevice device @@ -349,8 +349,8 @@ func (p *parser) parse() Interface { for { tok, lit := p.scan() - // We're ignoring COMMENT, NEWLINE - // An EOF will break the loop + // We're ignoring COMMENT, NEWLINE. + // An EOF will break the loop. if tok == EOF { break } @@ -408,10 +408,10 @@ func (p *parser) parse() Interface { } hkSubClass = hkClass.subClasses[uint32(id)] - // Clear the last detected sub class + // Clear the last detected sub class. hkFullID = hkFullID & 0xFFFF0000 hkFullID = hkFullID | uint32(id)<<8 - // Clear the last detected prog iface + // Clear the last detected prog iface. hkFullID = hkFullID & 0xFFFFFF00 hkFullName[1] = fmt.Sprintf("%s (%02x)", lit.name, id) diff --git a/vendor/github.com/NVIDIA/k8s-kata-manager/api/v1alpha1/config/consts.go b/vendor/github.com/NVIDIA/k8s-kata-manager/api/v1alpha1/config/consts.go index 3a7681aaf..ff7121ce1 100644 --- a/vendor/github.com/NVIDIA/k8s-kata-manager/api/v1alpha1/config/consts.go +++ b/vendor/github.com/NVIDIA/k8s-kata-manager/api/v1alpha1/config/consts.go @@ -16,6 +16,25 @@ package config +// Runtime defines container runtime type +type Runtime string + const ( DefaultKataArtifactsDir = "/opt/nvidia-gpu-operator/artifacts/runtimeclasses" + DefaultCrioRuntime = "crun" + // CRIO runtime + CRIO Runtime = "crio" + // Containerd runtime + Containerd Runtime = "containerd" ) + +func (r Runtime) String() string { + switch r { + case CRIO: + return "crio" + case Containerd: + return "containerd" + default: + return "" + } +} diff --git a/vendor/github.com/NVIDIA/k8s-operator-libs/api/upgrade/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/NVIDIA/k8s-operator-libs/api/upgrade/v1alpha1/zz_generated.deepcopy.go index 38f524f73..9c2adde64 100644 --- a/vendor/github.com/NVIDIA/k8s-operator-libs/api/upgrade/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/NVIDIA/k8s-operator-libs/api/upgrade/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 NVIDIA diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/all.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/all.go index 7dcabf0e5..cafb8f9c7 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/all.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/all.go @@ -20,10 +20,11 @@ import ( "fmt" "path/filepath" + "github.com/NVIDIA/go-nvlib/pkg/nvpci" + "github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc/devices" "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" "github.com/NVIDIA/nvidia-container-toolkit/internal/nvcaps" - "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci" ) type allPossible struct { diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/create-dev-char-symlinks.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/create-dev-char-symlinks.go index ed6455bdd..7d269b92f 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/create-dev-char-symlinks.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/create-dev-char-symlinks.go @@ -24,11 +24,12 @@ import ( "strings" "syscall" + "github.com/fsnotify/fsnotify" + "github.com/urfave/cli/v2" + "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" "github.com/NVIDIA/nvidia-container-toolkit/internal/system/nvdevices" "github.com/NVIDIA/nvidia-container-toolkit/internal/system/nvmodules" - "github.com/fsnotify/fsnotify" - "github.com/urfave/cli/v2" ) const ( @@ -86,7 +87,7 @@ func (m command) build() *cli.Command { Usage: "The path to the driver root. `DRIVER_ROOT`/dev is searched for NVIDIA device nodes.", Value: "/", Destination: &cfg.driverRoot, - EnvVars: []string{"DRIVER_ROOT"}, + EnvVars: []string{"NVIDIA_DRIVER_ROOT", "DRIVER_ROOT"}, }, &cli.BoolFlag{ Name: "watch", diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing.go index a1af8b204..d022a98fe 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing.go @@ -20,9 +20,10 @@ import ( "path/filepath" "strings" + "golang.org/x/sys/unix" + "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" "github.com/NVIDIA/nvidia-container-toolkit/internal/lookup" - "golang.org/x/sys/unix" ) type nodeLister interface { @@ -63,20 +64,13 @@ func (m existing) DeviceNodes() ([]deviceNode, error) { if m.nodeIsBlocked(d) { continue } - var stat unix.Stat_t err := unix.Stat(d, &stat) if err != nil { m.logger.Warningf("Could not stat device: %v", err) continue } - deviceNode := deviceNode{ - path: d, - major: unix.Major(uint64(stat.Rdev)), - minor: unix.Minor(uint64(stat.Rdev)), - } - - deviceNodes = append(deviceNodes, deviceNode) + deviceNodes = append(deviceNodes, newDeviceNode(d, stat)) } return deviceNodes, nil diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing_linux.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing_linux.go new file mode 100644 index 000000000..4aab942af --- /dev/null +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing_linux.go @@ -0,0 +1,28 @@ +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package devchar + +import "golang.org/x/sys/unix" + +func newDeviceNode(d string, stat unix.Stat_t) deviceNode { + deviceNode := deviceNode{ + path: d, + major: unix.Major(stat.Rdev), + minor: unix.Minor(stat.Rdev), + } + return deviceNode +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing_other.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing_other.go new file mode 100644 index 000000000..9be96294b --- /dev/null +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks/existing_other.go @@ -0,0 +1,30 @@ +//go:build !linux + +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package devchar + +import "golang.org/x/sys/unix" + +func newDeviceNode(d string, stat unix.Stat_t) deviceNode { + deviceNode := deviceNode{ + path: d, + major: unix.Major(uint64(stat.Rdev)), + minor: unix.Minor(uint64(stat.Rdev)), + } + return deviceNode +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc/devices/builder.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc/devices/builder.go index ed93939a5..6da9a90de 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc/devices/builder.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc/devices/builder.go @@ -42,7 +42,6 @@ func New(opts ...Option) Devices { return devices } -// Option defines a functional option. type Option func(*builder) // WithDeviceToMajor specifies an explicit device name to major number map. diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache.go index 7673a49a4..4daf95bce 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache.go @@ -22,15 +22,12 @@ import ( "bytes" "encoding/binary" "errors" - "fmt" "os" "path/filepath" - "strings" "syscall" "unsafe" "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" - "github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks" ) const ldcachePath = "/etc/ld.so.cache" @@ -81,9 +78,10 @@ type entry2 struct { } // LDCache represents the interface for performing lookups into the LDCache +// +//go:generate moq -rm -out ldcache_mock.go . LDCache type LDCache interface { List() ([]string, []string) - Lookup(...string) ([]string, []string) } type ldcache struct { @@ -187,7 +185,7 @@ type entry struct { } // getEntries returns the entires of the ldcache in a go-friendly struct. -func (c *ldcache) getEntries(selected func(string) bool) []entry { +func (c *ldcache) getEntries() []entry { var entries []entry for _, e := range c.entries { bits := 0 @@ -214,9 +212,6 @@ func (c *ldcache) getEntries(selected func(string) bool) []entry { c.logger.Debugf("Skipping invalid lib") continue } - if !selected(lib) { - continue - } value := bytesToString(c.libs[e.Value:]) if value == "" { c.logger.Debugf("Skipping invalid value for lib %v", lib) @@ -227,51 +222,19 @@ func (c *ldcache) getEntries(selected func(string) bool) []entry { bits: bits, value: value, } - entries = append(entries, e) } - return entries } -// List creates a list of libraires in the ldcache. +// List creates a list of libraries in the ldcache. // The 32-bit and 64-bit libraries are returned separately. func (c *ldcache) List() ([]string, []string) { - all := func(s string) bool { return true } - - return c.resolveSelected(all) -} - -// Lookup searches the ldcache for the specified prefixes. -// The 32-bit and 64-bit libraries matching the prefixes are returned. -func (c *ldcache) Lookup(libPrefixes ...string) ([]string, []string) { - c.logger.Debugf("Looking up %v in cache", libPrefixes) - - // We define a functor to check whether a given library name matches any of the prefixes - matchesAnyPrefix := func(s string) bool { - for _, p := range libPrefixes { - if strings.HasPrefix(s, p) { - return true - } - } - return false - } - - return c.resolveSelected(matchesAnyPrefix) -} - -// resolveSelected process the entries in the LDCach based on the supplied filter and returns the resolved paths. -// The paths are separated by bittage. -func (c *ldcache) resolveSelected(selected func(string) bool) ([]string, []string) { paths := make(map[int][]string) processed := make(map[string]bool) - for _, e := range c.getEntries(selected) { - path, err := c.resolve(e.value) - if err != nil { - c.logger.Debugf("Could not resolve entry: %v", err) - continue - } + for _, e := range c.getEntries() { + path := filepath.Join(c.root, e.value) if processed[path] { continue } @@ -282,29 +245,6 @@ func (c *ldcache) resolveSelected(selected func(string) bool) ([]string, []strin return paths[32], paths[64] } -// resolve resolves the specified ldcache entry based on the value being processed. -// The input is the name of the entry in the cache. -func (c *ldcache) resolve(target string) (string, error) { - name := filepath.Join(c.root, target) - - c.logger.Debugf("checking %v", string(name)) - - link, err := symlinks.Resolve(name) - if err != nil { - return "", fmt.Errorf("failed to resolve symlink: %v", err) - } - if link == name { - return name, nil - } - - // We return absolute paths for all targets - if !filepath.IsAbs(link) || strings.HasPrefix(link, ".") { - link = filepath.Join(filepath.Dir(target), link) - } - - return c.resolve(link) -} - // bytesToString converts a byte slice to a string. // This assumes that the byte slice is null-terminated func bytesToString(value []byte) string { diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache_mock.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache_mock.go new file mode 100644 index 000000000..5aa532351 --- /dev/null +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache/ldcache_mock.go @@ -0,0 +1,67 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package ldcache + +import ( + "sync" +) + +// Ensure, that LDCacheMock does implement LDCache. +// If this is not the case, regenerate this file with moq. +var _ LDCache = &LDCacheMock{} + +// LDCacheMock is a mock implementation of LDCache. +// +// func TestSomethingThatUsesLDCache(t *testing.T) { +// +// // make and configure a mocked LDCache +// mockedLDCache := &LDCacheMock{ +// ListFunc: func() ([]string, []string) { +// panic("mock out the List method") +// }, +// } +// +// // use mockedLDCache in code that requires LDCache +// // and then make assertions. +// +// } +type LDCacheMock struct { + // ListFunc mocks the List method. + ListFunc func() ([]string, []string) + + // calls tracks calls to the methods. + calls struct { + // List holds details about calls to the List method. + List []struct { + } + } + lockList sync.RWMutex +} + +// List calls ListFunc. +func (mock *LDCacheMock) List() ([]string, []string) { + if mock.ListFunc == nil { + panic("LDCacheMock.ListFunc: method is nil but LDCache.List was just called") + } + callInfo := struct { + }{} + mock.lockList.Lock() + mock.calls.List = append(mock.calls.List, callInfo) + mock.lockList.Unlock() + return mock.ListFunc() +} + +// ListCalls gets all the calls that were made to List. +// Check the length with: +// +// len(mockedLDCache.ListCalls()) +func (mock *LDCacheMock) ListCalls() []struct { +} { + var calls []struct { + } + mock.lockList.RLock() + calls = mock.calls.List + mock.lockList.RUnlock() + return calls +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/api.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/api.go index b8db97667..750c64c66 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/api.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/api.go @@ -24,4 +24,5 @@ type Interface interface { Infof(string, ...interface{}) Warning(...interface{}) Warningf(string, ...interface{}) + Tracef(string, ...interface{}) } diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/lib.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/lib.go index 300e925f0..ddb227bfd 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/lib.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/logger/lib.go @@ -45,3 +45,6 @@ func (l *NullLogger) Warning(...interface{}) {} // Warningf is a no-op for the null logger func (l *NullLogger) Warningf(string, ...interface{}) {} + +// Tracef is a no-op for the null logger +func (l *NullLogger) Tracef(string, ...interface{}) {} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/file.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/file.go index d6fb58259..8f3302731 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/file.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/file.go @@ -27,49 +27,55 @@ import ( // file can be used to locate file (or file-like elements) at a specified set of // prefixes. The validity of a file is determined by a filter function. type file struct { - logger logger.Interface - root string - prefixes []string - filter func(string) error - count int - isOptional bool + builder + prefixes []string } -// Option defines a function for passing options to the NewFileLocator() call -type Option func(*file) +// builder defines the builder for a file locator. +type builder struct { + logger logger.Interface + root string + searchPaths []string + filter func(string) error + count int + isOptional bool +} + +// Option defines a function for passing builder to the NewFileLocator() call +type Option func(*builder) // WithRoot sets the root for the file locator func WithRoot(root string) Option { - return func(f *file) { + return func(f *builder) { f.root = root } } // WithLogger sets the logger for the file locator func WithLogger(logger logger.Interface) Option { - return func(f *file) { + return func(f *builder) { f.logger = logger } } // WithSearchPaths sets the search paths for the file locator. func WithSearchPaths(paths ...string) Option { - return func(f *file) { - f.prefixes = paths + return func(f *builder) { + f.searchPaths = paths } } // WithFilter sets the filter for the file locator // The filter is called for each candidate file and candidates that return nil are considered. func WithFilter(assert func(string) error) Option { - return func(f *file) { + return func(f *builder) { f.filter = assert } } // WithCount sets the maximum number of candidates to discover func WithCount(count int) Option { - return func(f *file) { + return func(f *builder) { f.count = count } } @@ -77,32 +83,42 @@ func WithCount(count int) Option { // WithOptional sets the optional flag for the file locator // If the optional flag is set, the locator will not return an error if the file is not found. func WithOptional(optional bool) Option { - return func(f *file) { + return func(f *builder) { f.isOptional = optional } } -// NewFileLocator creates a Locator that can be used to find files with the specified options. -func NewFileLocator(opts ...Option) Locator { - return newFileLocator(opts...) -} - -func newFileLocator(opts ...Option) *file { - f := &file{} +func newBuilder(opts ...Option) *builder { + o := &builder{} for _, opt := range opts { - opt(f) + opt(o) + } + if o.logger == nil { + o.logger = logger.New() } - if f.logger == nil { - f.logger = logger.New() + if o.filter == nil { + o.filter = assertFile } - if f.filter == nil { - f.filter = assertFile + return o +} + +func (o builder) build() *file { + f := file{ + builder: o, + // Since the `Locate` implementations rely on the root already being specified we update + // the prefixes to include the root. + prefixes: getSearchPrefixes(o.root, o.searchPaths...), } - // Since the `Locate` implementations rely on the root already being specified we update - // the prefixes to include the root. - f.prefixes = getSearchPrefixes(f.root, f.prefixes...) + return &f +} + +// NewFileLocator creates a Locator that can be used to find files with the specified builder. +func NewFileLocator(opts ...Option) Locator { + return newFileLocator(opts...) +} - return f +func newFileLocator(opts ...Option) *file { + return newBuilder(opts...).build() } // getSearchPrefixes generates a list of unique paths to be searched by a file locator. @@ -144,6 +160,7 @@ var _ Locator = (*file)(nil) func (p file) Locate(pattern string) ([]string, error) { var filenames []string + p.logger.Debugf("Locating %q in %v", pattern, p.prefixes) visit: for _, prefix := range p.prefixes { pathPattern := filepath.Join(prefix, pattern) @@ -168,7 +185,7 @@ visit: } if !p.isOptional && len(filenames) == 0 { - return nil, fmt.Errorf("pattern %v not found", pattern) + return nil, fmt.Errorf("pattern %v %w", pattern, ErrNotFound) } return filenames, nil } diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/ldcache.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/ldcache.go new file mode 100644 index 000000000..677dafaa6 --- /dev/null +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/ldcache.go @@ -0,0 +1,118 @@ +/** +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package lookup + +import ( + "fmt" + "path/filepath" + "slices" + + "github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache" +) + +type ldcacheLocator struct { + *builder + resolvesTo map[string]string +} + +var _ Locator = (*ldcacheLocator)(nil) + +func NewLdcacheLocator(opts ...Option) Locator { + b := newBuilder(opts...) + + cache, err := ldcache.New(b.logger, b.root) + if err != nil { + b.logger.Warningf("Failed to load ldcache: %v", err) + if b.isOptional { + return &null{} + } + return ¬Found{} + } + + chain := NewSymlinkChainLocator(WithOptional(true)) + + resolvesTo := make(map[string]string) + _, libs64 := cache.List() + for _, library := range libs64 { + if _, processed := resolvesTo[library]; processed { + continue + } + candidates, err := chain.Locate(library) + if err != nil { + b.logger.Errorf("error processing library %s from ldcache: %v", library, err) + continue + } + + if len(candidates) == 0 { + resolvesTo[library] = library + continue + } + + // candidates represents a symlink chain. + // The first element represents the start of the chain and the last + // element the final target. + target := candidates[len(candidates)-1] + for _, candidate := range candidates { + resolvesTo[candidate] = target + } + } + + return &ldcacheLocator{ + builder: b, + resolvesTo: resolvesTo, + } +} + +// Locate finds the specified libraryname. +// If the input is a library name, the ldcache is searched otherwise the +// provided path is resolved as a symlink. +func (l ldcacheLocator) Locate(libname string) ([]string, error) { + var matcher func(string, string) bool + + if filepath.IsAbs(libname) { + matcher = func(p string, c string) bool { + m, _ := filepath.Match(filepath.Join(l.root, p), c) + return m + } + } else { + matcher = func(p string, c string) bool { + m, _ := filepath.Match(p, filepath.Base(c)) + return m + } + } + + var matches []string + seen := make(map[string]bool) + for name, target := range l.resolvesTo { + if !matcher(libname, name) { + continue + } + if seen[target] { + continue + } + seen[target] = true + matches = append(matches, target) + } + + slices.Sort(matches) + + if len(matches) == 0 && !l.isOptional { + return nil, fmt.Errorf("%s: %w", libname, ErrNotFound) + } + + return matches, nil +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/library.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/library.go index 0b5b7937b..6c403d084 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/library.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/library.go @@ -16,54 +16,40 @@ package lookup -import ( - "fmt" - "strings" - - "github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache" - "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" -) - -type library struct { - logger logger.Interface - symlink Locator - cache ldcache.LDCache -} - -var _ Locator = (*library)(nil) - -// NewLibraryLocator creates a library locator using the specified logger. -func NewLibraryLocator(logger logger.Interface, root string) (Locator, error) { - cache, err := ldcache.New(logger, root) - if err != nil { - return nil, fmt.Errorf("error loading ldcache: %v", err) - } - - l := library{ - logger: logger, - symlink: NewSymlinkLocator(WithLogger(logger), WithRoot(root)), - cache: cache, - } - - return &l, nil -} - -// Locate finds the specified libraryname. -// If the input is a library name, the ldcache is searched otherwise the -// provided path is resolved as a symlink. -func (l library) Locate(libname string) ([]string, error) { - if strings.Contains(libname, "/") { - return l.symlink.Locate(libname) - } - - paths32, paths64 := l.cache.Lookup(libname) - if len(paths32) > 0 { - l.logger.Warningf("Ignoring 32-bit libraries for %v: %v", libname, paths32) - } - - if len(paths64) == 0 { - return nil, fmt.Errorf("64-bit library %v not found", libname) +// NewLibraryLocator creates a library locator using the specified options. +func NewLibraryLocator(opts ...Option) Locator { + b := newBuilder(opts...) + + // If search paths are already specified, we return a locator for the specified search paths. + if len(b.searchPaths) > 0 { + return NewSymlinkLocator( + WithLogger(b.logger), + WithSearchPaths(b.searchPaths...), + WithRoot("/"), + ) } - return paths64, nil + opts = append(opts, + WithSearchPaths([]string{ + "/", + "/usr/lib64", + "/usr/lib/x86_64-linux-gnu", + "/usr/lib/aarch64-linux-gnu", + "/usr/lib/x86_64-linux-gnu/nvidia/current", + "/usr/lib/aarch64-linux-gnu/nvidia/current", + "/lib64", + "/lib/x86_64-linux-gnu", + "/lib/aarch64-linux-gnu", + "/lib/x86_64-linux-gnu/nvidia/current", + "/lib/aarch64-linux-gnu/nvidia/current", + }...), + ) + // We construct a symlink locator for expected library locations. + symlinkLocator := NewSymlinkLocator(opts...) + + l := First( + symlinkLocator, + NewLdcacheLocator(opts...), + ) + return l } diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/locator.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/locator.go index 871e1b025..73ade2322 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/locator.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/locator.go @@ -16,9 +16,14 @@ package lookup +import "errors" + //go:generate moq -stub -out locator_mock.go . Locator // Locator defines the interface for locating files on a system. type Locator interface { Locate(string) ([]string, error) } + +// ErrNotFound indicates that a specified pattern or file could not be found. +var ErrNotFound = errors.New("not found") diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/merge.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/merge.go new file mode 100644 index 000000000..fa20b5125 --- /dev/null +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/merge.go @@ -0,0 +1,53 @@ +/** +# Copyright 2023 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package lookup + +import "errors" + +type first []Locator + +// First returns a locator that returns the first non-empty match +func First(locators ...Locator) Locator { + var f first + for _, l := range locators { + if l == nil { + continue + } + f = append(f, l) + } + return f +} + +// Locate returns the results for the first locator that returns a non-empty non-error result. +func (f first) Locate(pattern string) ([]string, error) { + var allErrors []error + for _, l := range f { + if l == nil { + continue + } + candidates, err := l.Locate(pattern) + if err != nil { + allErrors = append(allErrors, err) + continue + } + if len(candidates) > 0 { + return candidates, nil + } + } + + return nil, errors.Join(allErrors...) +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/null.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/null.go new file mode 100644 index 000000000..938e481b4 --- /dev/null +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/null.go @@ -0,0 +1,36 @@ +/** +# Copyright 2024 NVIDIA CORPORATION +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package lookup + +import "fmt" + +// A null locator always returns an empty response. +type null struct { +} + +// Locate always returns empty for a null locator. +func (l *null) Locate(string) ([]string, error) { + return nil, nil +} + +// A notFound locator always returns an ErrNotFound error. +type notFound struct { +} + +func (l *notFound) Locate(s string) ([]string, error) { + return nil, fmt.Errorf("%s: %w", s, ErrNotFound) +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks.go index 002783cbe..c9bab069d 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks.go @@ -62,6 +62,7 @@ func (p symlinkChain) Locate(pattern string) ([]string, error) { return candidates, nil } + var filenames []string found := make(map[string]bool) for len(candidates) > 0 { candidate := candidates[0] @@ -70,6 +71,7 @@ func (p symlinkChain) Locate(pattern string) ([]string, error) { continue } found[candidate] = true + filenames = append(filenames, candidate) target, err := symlinks.Resolve(candidate) if err != nil { @@ -88,11 +90,6 @@ func (p symlinkChain) Locate(pattern string) ([]string, error) { candidates = append(candidates, target) } } - - var filenames []string - for f := range found { - filenames = append(filenames, f) - } return filenames, nil } @@ -103,14 +100,19 @@ func (p symlink) Locate(pattern string) ([]string, error) { if err != nil { return nil, err } - if len(candidates) != 1 { - return nil, fmt.Errorf("failed to uniquely resolve symlink %v: %v", pattern, candidates) - } - target, err := filepath.EvalSymlinks(candidates[0]) - if err != nil { - return nil, fmt.Errorf("failed to resolve link: %v", err) + var targets []string + seen := make(map[string]bool) + for _, candidate := range candidates { + target, err := filepath.EvalSymlinks(candidate) + if err != nil { + return nil, fmt.Errorf("failed to resolve link: %w", err) + } + if seen[target] { + continue + } + seen[target] = true + targets = append(targets, target) } - - return []string{target}, err + return targets, err } diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks/symlink.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks/symlink.go index 991d47cb6..f9151a2f2 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks/symlink.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/lookup/symlinks/symlink.go @@ -25,7 +25,7 @@ import ( func Resolve(filename string) (string, error) { info, err := os.Lstat(filename) if err != nil { - return filename, fmt.Errorf("failed to get file info: %v", info) + return filename, fmt.Errorf("failed to get file info: %w", err) } if info.Mode()&os.ModeSymlink == 0 { return filename, nil @@ -33,3 +33,18 @@ func Resolve(filename string) (string, error) { return os.Readlink(filename) } + +// ForceCreate creates a specified symlink. +// If a file (or empty directory) exists at the path it is removed. +func ForceCreate(target string, link string) error { + _, err := os.Lstat(link) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to get file info: %w", err) + } + if !os.IsNotExist(err) { + if err := os.Remove(link); err != nil { + return fmt.Errorf("failed to remove existing file: %w", err) + } + } + return os.Symlink(target, link) +} diff --git a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/system/nvdevices/mknod.go b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/system/nvdevices/mknod.go index e5990ea01..88a7aa441 100644 --- a/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/system/nvdevices/mknod.go +++ b/vendor/github.com/NVIDIA/nvidia-container-toolkit/internal/system/nvdevices/mknod.go @@ -17,8 +17,9 @@ package nvdevices import ( - "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" "golang.org/x/sys/unix" + + "github.com/NVIDIA/nvidia-container-toolkit/internal/logger" ) //go:generate moq -stub -out mknod_mock.go . mknoder diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b78..33c88305c 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9..78bddf1ce 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40..78f95f256 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba..118e49e81 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd..05f5e7dfe 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd8..cf9d42aed 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index ceceb21f5..31bbe4124 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -25,12 +25,12 @@ import ( "fmt" "io" "os" + "os/exec" "strconv" "sync" "github.com/containerd/containerd/log" "github.com/klauspost/compress/zstd" - exec "golang.org/x/sys/execabs" ) type ( diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 5404109a6..147005413 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -332,3 +332,14 @@ func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { } return } + +// Exists returns whether an attempt to access the content would not error out +// with an ErrNotFound error. It will return an encountered error if it was +// different than ErrNotFound. +func Exists(ctx context.Context, provider InfoProvider, desc ocispec.Descriptor) (bool, error) { + _, err := provider.Info(ctx, desc.Digest) + if errdefs.IsNotFound(err) { + return false, nil + } + return err == nil, err +} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index 45767163c..c01bc57e8 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.7.11+unknown" + Version = "1.7.12+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go new file mode 100644 index 000000000..0ec4b12c7 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go @@ -0,0 +1,62 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +func fmtListFlags(flags blackfriday.ListType) string { + knownFlags := []struct { + name string + flag blackfriday.ListType + }{ + {"ListTypeOrdered", blackfriday.ListTypeOrdered}, + {"ListTypeDefinition", blackfriday.ListTypeDefinition}, + {"ListTypeTerm", blackfriday.ListTypeTerm}, + {"ListItemContainsBlock", blackfriday.ListItemContainsBlock}, + {"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList}, + {"ListItemEndOfList", blackfriday.ListItemEndOfList}, + } + + var f []string + for _, kf := range knownFlags { + if flags&kf.flag != 0 { + f = append(f, kf.name) + flags &^= kf.flag + } + } + if flags != 0 { + f = append(f, fmt.Sprintf("Unknown(%#x)", flags)) + } + return strings.Join(f, "|") +} + +type debugDecorator struct { + blackfriday.Renderer +} + +func depth(node *blackfriday.Node) int { + d := 0 + for n := node.Parent; n != nil; n = n.Parent { + d++ + } + return d +} + +func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + fmt.Fprintf(os.Stderr, "%s%s %v %v\n", + strings.Repeat(" ", depth(node)), + map[bool]string{true: "+", false: "-"}[entering], + node, + fmtListFlags(node.ListFlags)) + var b strings.Builder + status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering) + if b.Len() > 0 { + fmt.Fprintf(os.Stderr, ">> %q\n", b.String()) + } + return status +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index 42bf32aab..62d91b77d 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -1,16 +1,23 @@ package md2man import ( + "os" + "strconv" + "github.com/russross/blackfriday/v2" ) // Render converts a markdown document into a roff formatted document. func Render(doc []byte) []byte { renderer := NewRoffRenderer() + var r blackfriday.Renderer = renderer + if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v { + r = &debugDecorator{Renderer: r} + } return blackfriday.Run(doc, []blackfriday.Option{ - blackfriday.WithRenderer(renderer), + blackfriday.WithRenderer(r), blackfriday.WithExtensions(renderer.GetExtensions()), }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 4b19188d9..9d6c473fd 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bufio" "bytes" "fmt" "io" @@ -13,68 +14,72 @@ import ( // roffRenderer implements the blackfriday.Renderer interface for creating // roff format (manpages) from markdown text type roffRenderer struct { - extensions blackfriday.Extensions listCounters []int firstHeader bool - firstDD bool listDepth int } const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB" - codespanCloseTag = "\\fR" - codeTag = "\n.EX\n" - codeCloseTag = "\n.EE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - dtTag = "\n.TP\n" - dd2Tag = "\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = ".RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" + tablePreprocessor = `'\" t` ) // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents // from markdown func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } + return &roffRenderer{} } // GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions +func (*roffRenderer) GetExtensions() blackfriday.Extensions { + return blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.SpaceHeadings | + blackfriday.Footnotes | + blackfriday.Titleblock | + blackfriday.DefinitionLists } // RenderHeader handles outputting the header at document start func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + // disable hyphenation out(w, ".nh\n") } @@ -91,7 +96,23 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - escapeSpecialChars(w, node.Literal) + // Special case: format the NAME section as required for proper whatis parsing. + // Refer to the lexgrog(1) and groff_man(7) manual pages for details. + if node.Parent != nil && + node.Parent.Type == blackfriday.Paragraph && + node.Parent.Prev != nil && + node.Parent.Prev.Type == blackfriday.Heading && + node.Parent.Prev.FirstChild != nil && + bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) { + before, after, found := bytes.Cut(node.Literal, []byte(" - ")) + escapeSpecialChars(w, before) + if found { + out(w, ` \- `) + escapeSpecialChars(w, after) + } + } else { + escapeSpecialChars(w, node.Literal) + } case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -129,14 +150,25 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering case blackfriday.Document: break case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } if entering { - out(w, paraTag) + if r.listDepth > 0 { + // roff .PP markers break lists + if node.Prev != nil { // continued paragraph + if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, crTag) + } + } + } else if node.Prev != nil && node.Prev.Type == blackfriday.Heading { + out(w, crTag) + } else { + out(w, paraTag) + } } else { - out(w, crTag) + if node.Next == nil || node.Next.Type != blackfriday.List { + out(w, crTag) + } } case blackfriday.BlockQuote: if entering { @@ -199,6 +231,10 @@ func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, enteri func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { openTag := listTag closeTag := listCloseTag + if (entering && r.listDepth == 0) || (!entering && r.listDepth == 1) { + openTag = crTag + closeTag = "" + } if node.ListFlags&blackfriday.ListTypeDefinition != 0 { // tags for definition lists handled within Item node openTag = "" @@ -227,23 +263,25 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { // DT (definition term): line just before DD (see below). out(w, dtTag) - r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { // DD (definition description): line that starts with ": ". // // We have to distinguish between the first DD and the // subsequent ones, as there should be no vertical // whitespace between the DT and the first DD. - if r.firstDD { - r.firstDD = false - } else { - out(w, dd2Tag) + if node.Prev != nil && node.Prev.ListFlags&(blackfriday.ListTypeTerm|blackfriday.ListTypeDefinition) == blackfriday.ListTypeDefinition { + if node.Prev.Type == blackfriday.Item && + node.Prev.LastChild != nil && + node.Prev.LastChild.Type == blackfriday.List && + node.Prev.LastChild.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, dd2Tag) + } } } else { out(w, ".IP \\(bu 2\n") } - } else { - out(w, "\n") } } @@ -322,6 +360,28 @@ func out(w io.Writer, output string) { } func escapeSpecialChars(w io.Writer, text []byte) { + scanner := bufio.NewScanner(bytes.NewReader(text)) + + // count the number of lines in the text + // we need to know this to avoid adding a newline after the last line + n := bytes.Count(text, []byte{'\n'}) + idx := 0 + + for scanner.Scan() { + dt := scanner.Bytes() + if idx < n { + idx++ + dt = append(dt, '\n') + } + escapeSpecialCharsLine(w, dt) + } + + if err := scanner.Err(); err != nil { + panic(err) + } +} + +func escapeSpecialCharsLine(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md new file mode 100644 index 000000000..7436896e1 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -0,0 +1,138 @@ +# Changelog # +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [Unreleased] ## + +## [0.3.1] - 2024-07-23 ## + +### Changed ### +- By allowing `Open(at)InRoot` to opt-out of the extra work done by `MkdirAll` + to do the necessary "partial lookups", `Open(at)InRoot` now does less work + for both implementations (resulting in a many-fold decrease in the number of + operations for `openat2`, and a modest improvement for non-`openat2`) and is + far more guaranteed to match the correct `openat2(RESOLVE_IN_ROOT)` + behaviour. +- We now use `readlinkat(fd, "")` where possible. For `Open(at)InRoot` this + effectively just means that we no longer risk getting spurious errors during + rename races. However, for our hardened procfs handler, this in theory should + prevent mount attacks from tricking us when doing magic-link readlinks (even + when using the unsafe host `/proc` handle). Unfortunately `Reopen` is still + potentially vulnerable to those kinds of somewhat-esoteric attacks. + + Technically this [will only work on post-2.6.39 kernels][linux-readlinkat-emptypath] + but it seems incredibly unlikely anyone is using `filepath-securejoin` on a + pre-2011 kernel. + +### Fixed ### +- Several improvements were made to the errors returned by `Open(at)InRoot` and + `MkdirAll` when dealing with invalid paths under the emulated (ie. + non-`openat2`) implementation. Previously, some paths would return the wrong + error (`ENOENT` when the last component was a non-directory), and other paths + would be returned as though they were acceptable (trailing-slash components + after a non-directory would be ignored by `Open(at)InRoot`). + + These changes were done to match `openat2`'s behaviour and purely is a + consistency fix (most users are going to be using `openat2` anyway). + +[linux-readlinkat-emptypath]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=65cfc6722361570bfe255698d9cd4dccaf47570d + +## [0.3.0] - 2024-07-11 ## + +### Added ### +- A new set of `*os.File`-based APIs have been added. These are adapted from + [libpathrs][] and we strongly suggest using them if possible (as they provide + far more protection against attacks than `SecureJoin`): + + - `Open(at)InRoot` resolves a path inside a rootfs and returns an `*os.File` + handle to the path. Note that the handle returned is an `O_PATH` handle, + which cannot be used for reading or writing (as well as some other + operations -- [see open(2) for more details][open.2]) + + - `Reopen` takes an `O_PATH` file handle and safely re-opens it to upgrade + it to a regular handle. This can also be used with non-`O_PATH` handles, + but `O_PATH` is the most obvious application. + + - `MkdirAll` is an implementation of `os.MkdirAll` that is safe to use to + create a directory tree within a rootfs. + + As these are new APIs, they may change in the future. However, they should be + safe to start migrating to as we have extensive tests ensuring they behave + correctly and are safe against various races and other attacks. + +[libpathrs]: https://github.com/openSUSE/libpathrs +[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html + +## [0.2.5] - 2024-05-03 ## + +### Changed ### +- Some minor changes were made to how lexical components (like `..` and `.`) + are handled during path generation in `SecureJoin`. There is no behaviour + change as a result of this fix (the resulting paths are the same). + +### Fixed ### +- The error returned when we hit a symlink loop now references the correct + path. (#10) + +## [0.2.4] - 2023-09-06 ## + +### Security ### +- This release fixes a potential security issue in filepath-securejoin when + used on Windows ([GHSA-6xv5-86q9-7xr8][], which could be used to generate + paths outside of the provided rootfs in certain cases), as well as improving + the overall behaviour of filepath-securejoin when dealing with Windows paths + that contain volume names. Thanks to Paulo Gomes for discovering and fixing + these issues. + +### Fixed ### +- Switch to GitHub Actions for CI so we can test on Windows as well as Linux + and MacOS. + +[GHSA-6xv5-86q9-7xr8]: https://github.com/advisories/GHSA-6xv5-86q9-7xr8 + +## [0.2.3] - 2021-06-04 ## + +### Changed ### +- Switch to Go 1.13-style `%w` error wrapping, letting us drop the dependency + on `github.com/pkg/errors`. + +## [0.2.2] - 2018-09-05 ## + +### Changed ### +- Use `syscall.ELOOP` as the base error for symlink loops, rather than our own + (internal) error. This allows callers to more easily use `errors.Is` to check + for this case. + +## [0.2.1] - 2018-09-05 ## + +### Fixed ### +- Use our own `IsNotExist` implementation, which lets us handle `ENOTDIR` + properly within `SecureJoin`. + +## [0.2.0] - 2017-07-19 ## + +We now have 100% test coverage! + +### Added ### +- Add a `SecureJoinVFS` API that can be used for mocking (as we do in our new + tests) or for implementing custom handling of lookup operations (such as for + rootless containers, where work is necessary to access directories with weird + modes because we don't have `CAP_DAC_READ_SEARCH` or `CAP_DAC_OVERRIDE`). + +## 0.1.0 - 2017-07-19 + +This is our first release of `github.com/cyphar/filepath-securejoin`, +containing a full implementation with a coverage of 93.5% (the only missing +cases are the error cases, which are hard to mocktest at the moment). + +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...HEAD +[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0 +[0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5 +[0.2.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.3...v0.2.4 +[0.2.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.2...v0.2.3 +[0.2.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.1...v0.2.2 +[0.2.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE index bec842f29..cb1ab88da 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/LICENSE +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -1,5 +1,5 @@ Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -Copyright (C) 2017 SUSE LLC. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 4eca0f235..253956f86 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -2,31 +2,24 @@ [![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) -An implementation of `SecureJoin`, a [candidate for inclusion in the Go -standard library][go#20126]. The purpose of this function is to be a "secure" -alternative to `filepath.Join`, and in particular it provides certain -guarantees that are not provided by `filepath.Join`. - -> **NOTE**: This code is *only* safe if you are not at risk of other processes -> modifying path components after you've used `SecureJoin`. If it is possible -> for a malicious process to modify path components of the resolved path, then -> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There -> are some Linux kernel patches I'm working on which might allow for a better -> solution.][lwn-obeneath] -> -> In addition, with a slightly modified API it might be possible to use -> `O_PATH` and verify that the opened path is actually the resolved one -- but -> I have not done that yet. I might add it in the future as a helper function -> to help users verify the path (we can't just return `/proc/self/fd/` -> because that doesn't always work transparently for all users). - -This is the function prototype: +### Old API ### -```go -func SecureJoin(root, unsafePath string) (string, error) -``` +This library was originally just an implementation of `SecureJoin` which was +[intended to be included in the Go standard library][go#20126] as a safer +`filepath.Join` that would restrict the path lookup to be inside a root +directory. + +The implementation was based on code that existed in several container +runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers +that can modify path components after `SecureJoin` returns and before the +caller uses the path, allowing for some fairly trivial TOCTOU attacks. + +`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to +support legacy users, but new users are strongly suggested to avoid using +`SecureJoin` and instead use the [new api](#new-api) or switch to +[libpathrs][libpathrs]. -This library **guarantees** the following: +With the above limitations in mind, this library guarantees the following: * If no error is set, the resulting string **must** be a child path of `root` and will not contain any symlink path components (they will all be @@ -47,7 +40,7 @@ This library **guarantees** the following: A (trivial) implementation of this function on GNU/Linux systems could be done with the following (note that this requires root privileges and is far more opaque than the implementation in this library, and also requires that -`readlink` is inside the `root` path): +`readlink` is inside the `root` path and is trustworthy): ```go package securejoin @@ -70,9 +63,105 @@ func SecureJoin(root, unsafePath string) (string, error) { } ``` -[lwn-obeneath]: https://lwn.net/Articles/767547/ +[libpathrs]: https://github.com/openSUSE/libpathrs [go#20126]: https://github.com/golang/go/issues/20126 +### New API ### + +While we recommend users switch to [libpathrs][libpathrs] as soon as it has a +stable release, some methods implemented by libpathrs have been ported to this +library to ease the transition. These APIs are only supported on Linux. + +These APIs are implemented such that `filepath-securejoin` will +opportunistically use certain newer kernel APIs that make these operations far +more secure. In particular: + +* All of the lookup operations will use [`openat2`][openat2.2] on new enough + kernels (Linux 5.6 or later) to restrict lookups through magic-links and + bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to + efficiently resolve symlinks within a rootfs. + +* The APIs provide hardening against a malicious `/proc` mount to either detect + or avoid being tricked by a `/proc` that is not legitimate. This is done + using [`openat2`][openat2.2] for all users, and privileged users will also be + further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2] + (Linux 4.18 or later). + +[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html +[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md +[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md + +#### `OpenInRoot` #### + +```go +func OpenInRoot(root, unsafePath string) (*os.File, error) +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) +func Reopen(handle *os.File, flags int) (*os.File, error) +``` + +`OpenInRoot` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +``` + +that protects against various race attacks that could lead to serious security +issues, depending on the application. Note that the returned `*os.File` is an +`O_PATH` file descriptor, which is quite restricted. Callers will probably need +to use `Reopen` to get a more usable handle (this split is done to provide +useful features like PTY spawning and to avoid users accidentally opening bad +inodes that could cause a DoS). + +Callers need to be careful in how they use the returned `*os.File`. Usually it +is only safe to operate on the handle directly, and it is very easy to create a +security issue. [libpathrs][libpathrs] provides far more helpers to make using +these handles safer -- there is currently no plan to port them to +`filepath-securejoin`. + +`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an +`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or +`MkdirAllHandle`) calls are operating on the same rootfs. + +> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. + +#### `MkdirAll` #### + +```go +func MkdirAll(root, unsafePath string, mode int) error +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error) +``` + +`MkdirAll` is a much safer version of + +```go +path, err := securejoin.SecureJoin(root, unsafePath) +err = os.MkdirAll(path, mode) +``` + +that protects against the same kinds of races that `OpenInRoot` protects +against. + +`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an +`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an +`*os.File` of the final created directory is returned (this directory is +guaranteed to be effectively identical to the directory created by +`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot` +after `MkdirAll`). + +> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits +> a dangling symlink or non-existent path. This is in contrast to `SecureJoin` +> which treated non-existent components as though they were real directories, +> and would allow for partial resolution of dangling symlinks. These behaviours +> are at odds with how Linux treats non-existent paths and dangling symlinks, +> and so these are no longer allowed. This means that `MkdirAll` will not +> create non-existent directories referenced by a dangling symlink. + ### License ### The license of this project is the same as Go, which is a BSD 3-clause license diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index abd410582..9e11b32fc 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.4 +0.3.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index aa32b85fb..bd86a48b0 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,5 +1,5 @@ // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -11,7 +11,6 @@ package securejoin import ( - "bytes" "errors" "os" "path/filepath" @@ -19,6 +18,8 @@ import ( "syscall" ) +const maxSymlinkLimit = 255 + // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is // effectively a more broad version of os.IsNotExist. @@ -40,6 +41,12 @@ func IsNotExist(err error) bool { // replaced with symlinks on the filesystem) after this function has returned. // Such a symlink race is necessarily out-of-scope of SecureJoin. // +// NOTE: Due to the above limitation, Linux users are strongly encouraged to +// use OpenInRoot instead, which does safely protect against these kinds of +// attacks. There is no way to solve this problem with SecureJoinVFS because +// the API is fundamentally wrong (you cannot return a "safe" path string and +// guarantee it won't be modified afterwards). +// // Volume names in unsafePath are always discarded, regardless if they are // provided via direct input or when evaluating symlinks. Therefore: // @@ -51,71 +58,69 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { } unsafePath = filepath.FromSlash(unsafePath) - var path bytes.Buffer - n := 0 - for unsafePath != "" { - if n > 255 { - return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + var ( + currentPath string + remainingPath = unsafePath + linksWalked int + ) + for remainingPath != "" { + if v := filepath.VolumeName(remainingPath); v != "" { + remainingPath = remainingPath[len(v):] } - if v := filepath.VolumeName(unsafePath); v != "" { - unsafePath = unsafePath[len(v):] - } - - // Next path component, p. - i := strings.IndexRune(unsafePath, filepath.Separator) - var p string - if i == -1 { - p, unsafePath = unsafePath, "" + // Get the next path component. + var part string + if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 { + part, remainingPath = remainingPath, "" } else { - p, unsafePath = unsafePath[:i], unsafePath[i+1:] + part, remainingPath = remainingPath[:i], remainingPath[i+1:] } - // Create a cleaned path, using the lexical semantics of /../a, to - // create a "scoped" path component which can safely be joined to fullP - // for evaluation. At this point, path.String() doesn't contain any - // symlink components. - cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) - if cleanP == string(filepath.Separator) { - path.Reset() + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := filepath.Join(string(filepath.Separator), currentPath, part) + if nextPath == string(filepath.Separator) { + currentPath = "" continue } - fullP := filepath.Clean(root + cleanP) + fullPath := root + string(filepath.Separator) + nextPath // Figure out whether the path is a symlink. - fi, err := vfs.Lstat(fullP) + fi, err := vfs.Lstat(fullPath) if err != nil && !IsNotExist(err) { return "", err } // Treat non-existent path components the same as non-symlinks (we // can't do any better here). if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { - path.WriteString(p) - path.WriteRune(filepath.Separator) + currentPath = nextPath continue } - // Only increment when we actually dereference a link. - n++ + // It's a symlink, so get its contents and expand it by prepending it + // to the yet-unparsed path. + linksWalked++ + if linksWalked > maxSymlinkLimit { + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } - // It's a symlink, expand it by prepending it to the yet-unparsed path. - dest, err := vfs.Readlink(fullP) + dest, err := vfs.Readlink(fullPath) if err != nil { return "", err } + remainingPath = dest + string(filepath.Separator) + remainingPath // Absolute symlinks reset any work we've already done. if filepath.IsAbs(dest) { - path.Reset() + currentPath = "" } - unsafePath = dest + string(filepath.Separator) + unsafePath } - // We have to clean path.String() here because it may contain '..' - // components that are entirely lexical, but would be misleading otherwise. - // And finally do a final clean to ensure that root is also lexically - // clean. - fullP := filepath.Clean(string(filepath.Separator) + path.String()) - return filepath.Clean(root + fullP), nil + // There should be no lexical components like ".." left in the path here, + // but for safety clean up the path before joining it to the root. + finalPath := filepath.Join(string(filepath.Separator), currentPath) + return filepath.Join(root, finalPath), nil } // SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go new file mode 100644 index 000000000..290befa15 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go @@ -0,0 +1,389 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "slices" + "strings" + + "golang.org/x/sys/unix" +) + +type symlinkStackEntry struct { + // (dir, remainingPath) is what we would've returned if the link didn't + // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in + // this case. + dir *os.File + remainingPath string + // linkUnwalked is the remaining path components from the original + // Readlink which we have yet to walk. When this slice is empty, we + // drop the link from the stack. + linkUnwalked []string +} + +func (se symlinkStackEntry) String() string { + return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) +} + +func (se symlinkStackEntry) Close() { + _ = se.dir.Close() +} + +type symlinkStack []*symlinkStackEntry + +func (s *symlinkStack) IsEmpty() bool { + return s == nil || len(*s) == 0 +} + +func (s *symlinkStack) Close() { + if s != nil { + for _, link := range *s { + link.Close() + } + // TODO: Switch to clear once we switch to Go 1.21. + *s = nil + } +} + +var ( + errEmptyStack = errors.New("[internal] stack is empty") + errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") +) + +func (s *symlinkStack) popPart(part string) error { + if s == nil || s.IsEmpty() { + // If there is nothing in the symlink stack, then the part was from the + // real path provided by the user, and this is a no-op. + return errEmptyStack + } + if part == "." { + // "." components are no-ops -- we drop them when doing SwapLink. + return nil + } + + tailEntry := (*s)[len(*s)-1] + + // Double-check that we are popping the component we expect. + if len(tailEntry.linkUnwalked) == 0 { + return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) + } + headPart := tailEntry.linkUnwalked[0] + if headPart != part { + return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) + } + + // Drop the component, but keep the entry around in case we are dealing + // with a "tail-chained" symlink. + tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] + return nil +} + +func (s *symlinkStack) PopPart(part string) error { + if err := s.popPart(part); err != nil { + if errors.Is(err, errEmptyStack) { + // Skip empty stacks. + err = nil + } + return err + } + + // Clean up any of the trailing stack entries that are empty. + for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { + entry := (*s)[lastGood] + if len(entry.linkUnwalked) > 0 { + break + } + entry.Close() + (*s) = (*s)[:lastGood] + } + return nil +} + +func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { + if s == nil { + return nil + } + // Split the link target and clean up any "" parts. + linkTargetParts := slices.DeleteFunc( + strings.Split(linkTarget, "/"), + func(part string) bool { return part == "" || part == "." }) + + // Copy the directory so the caller doesn't close our copy. + dirCopy, err := dupFile(dir) + if err != nil { + return err + } + + // Add to the stack. + *s = append(*s, &symlinkStackEntry{ + dir: dirCopy, + remainingPath: remainingPath, + linkUnwalked: linkTargetParts, + }) + return nil +} + +func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { + // If we are currently inside a symlink resolution, remove the symlink + // component from the last symlink entry, but don't remove the entry even + // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we + // hit during a symlink resolution) we need to keep the old symlink until + // we finish the resolution. + if err := s.popPart(linkPart); err != nil { + if !errors.Is(err, errEmptyStack) { + return err + } + // Push the component regardless of whether the stack was empty. + } + return s.push(dir, remainingPath, linkTarget) +} + +func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { + if s == nil || s.IsEmpty() { + return nil, "", false + } + tailEntry := (*s)[0] + *s = (*s)[1:] + return tailEntry.dir, tailEntry.remainingPath, true +} + +// partialLookupInRoot tries to lookup as much of the request path as possible +// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing +// component of the requested path, returning a file handle to the final +// existing component and a string containing the remaining path components. +func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { + return lookupInRoot(root, unsafePath, true) +} + +func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, remainingPath, err := lookupInRoot(root, unsafePath, false) + if remainingPath != "" && err == nil { + // should never happen + err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath) + } + // lookupInRoot(partial=false) will always close the handle if an error is + // returned, so no need to double-check here. + return handle, err +} + +func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { + unsafePath = filepath.ToSlash(unsafePath) // noop + + // This is very similar to SecureJoin, except that we operate on the + // components using file descriptors. We then return the last component we + // managed open, along with the remaining path components not opened. + + // Try to use openat2 if possible. + if hasOpenat2() { + return lookupOpenat2(root, unsafePath, partial) + } + + // Get the "actual" root path from /proc/self/fd. This is necessary if the + // root is some magic-link like /proc/$pid/root, in which case we want to + // make sure when we do checkProcSelfFdPath that we are using the correct + // root path. + logicalRootPath, err := procSelfFdReadlink(root) + if err != nil { + return nil, "", fmt.Errorf("get real root path: %w", err) + } + + currentDir, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + defer func() { + // If a handle is not returned, close the internal handle. + if Handle == nil { + _ = currentDir.Close() + } + }() + + // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats + // dangling symlinks. If we hit a non-existent path while resolving a + // symlink, we need to return the (dir, remainingPath) that we had when we + // hit the symlink (treating the symlink as though it were a regular file). + // The set of (dir, remainingPath) sets is stored within the symlinkStack + // and we add and remove parts when we hit symlink and non-symlink + // components respectively. We need a stack because of recursive symlinks + // (symlinks that contain symlink components in their target). + // + // Note that the stack is ONLY used for book-keeping. All of the actual + // path walking logic is still based on currentPath/remainingPath and + // currentDir (as in SecureJoin). + var symStack *symlinkStack + if partial { + symStack = new(symlinkStack) + defer symStack.Close() + } + + var ( + linksWalked int + currentPath string + remainingPath = unsafePath + ) + for remainingPath != "" { + // Save the current remaining path so if the part is not real we can + // return the path including the component. + oldRemainingPath := remainingPath + + // Get the next path component. + var part string + if i := strings.IndexByte(remainingPath, '/'); i == -1 { + part, remainingPath = remainingPath, "" + } else { + part, remainingPath = remainingPath[:i], remainingPath[i+1:] + } + // If we hit an empty component, we need to treat it as though it is + // "." so that trailing "/" and "//" components on a non-directory + // correctly return the right error code. + if part == "" { + part = "." + } + + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := path.Join("/", currentPath, part) + // If we logically hit the root, just clone the root rather than + // opening the part and doing all of the other checks. + if nextPath == "/" { + if err := symStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) + } + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = nextPath + continue + } + + // Try to open the next component. + nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + switch { + case err == nil: + st, err := nextDir.Stat() + if err != nil { + _ = nextDir.Close() + return nil, "", fmt.Errorf("stat component %q: %w", part, err) + } + + switch st.Mode() & os.ModeType { + case os.ModeSymlink: + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See + // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and + // fstatat() with empty relative pathnames"). + linkDest, err := readlinkatFile(nextDir, "") + // We don't need the handle anymore. + _ = nextDir.Close() + if err != nil { + return nil, "", err + } + + linksWalked++ + if linksWalked > maxSymlinkLimit { + return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} + } + + // Swap out the symlink's component for the link entry itself. + if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { + return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) + } + + // Update our logical remaining path. + remainingPath = linkDest + "/" + remainingPath + // Absolute symlinks reset any work we've already done. + if path.IsAbs(linkDest) { + // Jump to root. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", fmt.Errorf("clone root fd: %w", err) + } + _ = currentDir.Close() + currentDir = rootClone + currentPath = "/" + } + + default: + // If we are dealing with a directory, simply walk into it. + _ = currentDir.Close() + currentDir = nextDir + currentPath = nextPath + + // The part was real, so drop it from the symlink stack. + if err := symStack.PopPart(part); err != nil { + return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) + } + + // If we are operating on a .., make sure we haven't escaped. + // We only have to check for ".." here because walking down + // into a regular component component cannot cause you to + // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we + // have to check every ".." rather than only checking after a + // rename or mount on the system. + if part == ".." { + // Make sure the root hasn't moved. + if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { + return nil, "", fmt.Errorf("root path moved during lookup: %w", err) + } + // Make sure the path is what we expect. + fullPath := logicalRootPath + nextPath + if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { + return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) + } + } + } + + default: + if !partial { + return nil, "", err + } + // If there are any remaining components in the symlink stack, we + // are still within a symlink resolution and thus we hit a dangling + // symlink. So pretend that the first symlink in the stack we hit + // was an ENOENT (to match openat2). + if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok { + _ = currentDir.Close() + return oldDir, remainingPath, err + } + // We have hit a final component that doesn't exist, so we have our + // partial open result. Note that we have to use the OLD remaining + // path, since the lookup failed. + return currentDir, oldRemainingPath, err + } + } + + // If the unsafePath had a trailing slash, we need to make sure we try to + // do a relative "." open so that we will correctly return an error when + // the final component is a non-directory (to match openat2). In the + // context of openat2, a trailing slash and a trailing "/." are completely + // equivalent. + if strings.HasSuffix(unsafePath, "/") { + nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + if !partial { + _ = currentDir.Close() + currentDir = nil + } + return currentDir, "", err + } + _ = currentDir.Close() + currentDir = nextDir + } + + // All of the components existed! + return currentDir, "", nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go new file mode 100644 index 000000000..ad2bd7973 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go @@ -0,0 +1,229 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "slices" + "strings" + + "golang.org/x/sys/unix" +) + +var ( + errInvalidMode = errors.New("invalid permission mode") + errPossibleAttack = errors.New("possible attack detected") +) + +// MkdirAllHandle is equivalent to MkdirAll, except that it is safer to use in +// two respects: +// +// - The caller provides the root directory as an *os.File (preferably O_PATH) +// handle. This means that the caller can be sure which root directory is +// being used. Note that this can be emulated by using /proc/self/fd/... as +// the root path with MkdirAll. +// +// - Once all of the directories have been created, an *os.File (O_PATH) handle +// to the directory at unsafePath is returned to the caller. This is done in +// an effectively-race-free way (an attacker would only be able to swap the +// final directory component), which is not possible to emulate with +// MkdirAll. +// +// In addition, the returned handle is obtained far more efficiently than doing +// a brand new lookup of unsafePath (such as with SecureJoin or openat2) after +// doing MkdirAll. If you intend to open the directory after creating it, you +// should use MkdirAllHandle. +func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) { + // Make sure there are no os.FileMode bits set. + if mode&^0o7777 != 0 { + return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode) + } + + // Try to open as much of the path as possible. + currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) + defer func() { + if Err != nil { + _ = currentDir.Close() + } + }() + if err != nil && !errors.Is(err, unix.ENOENT) { + return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) + } + + // If there is an attacker deleting directories as we walk into them, + // detect this proactively. Note this is guaranteed to detect if the + // attacker deleted any part of the tree up to currentDir. + // + // Once we walk into a dead directory, partialLookupInRoot would not be + // able to walk further down the tree (directories must be empty before + // they are deleted), and if the attacker has removed the entire tree we + // can be sure that anything that was originally inside a dead directory + // must also be deleted and thus is a dead directory in its own right. + // + // This is mostly a quality-of-life check, because mkdir will simply fail + // later if the attacker deletes the tree after this check. + if err := isDeadInode(currentDir); err != nil { + return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) + } + + // Re-open the path to match the O_DIRECTORY reopen loop later (so that we + // always return a non-O_PATH handle). We also check that we actually got a + // directory. + if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { + return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) + } else if err != nil { + return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) + } else { + _ = currentDir.Close() + currentDir = reopenDir + } + + remainingParts := strings.Split(remainingPath, string(filepath.Separator)) + if slices.Contains(remainingParts, "..") { + // The path contained ".." components after the end of the "real" + // components. We could try to safely resolve ".." here but that would + // add a bunch of extra logic for something that it's not clear even + // needs to be supported. So just return an error. + // + // If we do filepath.Clean(remainingPath) then we end up with the + // problem that ".." can erase a trailing dangling symlink and produce + // a path that doesn't quite match what the user asked for. + return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) + } + + // Make sure the mode doesn't have any type bits. + mode &^= unix.S_IFMT + // What properties do we expect any newly created directories to have? + var ( + // While umask(2) is a per-thread property, and thus this value could + // vary between threads, a functioning Go program would LockOSThread + // threads with different umasks and so we don't need to LockOSThread + // for this entire mkdirat loop (if we are in the locked thread with a + // different umask, we are already locked and there's nothing for us to + // do -- and if not then it doesn't matter which thread we run on and + // there's nothing for us to do). + expectedMode = uint32(unix.S_IFDIR | (mode &^ getUmask())) + + // We would want to get the fs[ug]id here, but we can't access those + // from userspace. In practice, nobody uses setfs[ug]id() anymore, so + // just use the effective [ug]id (which is equivalent to the fs[ug]id + // for programs that don't use setfs[ug]id). + expectedUid = uint32(unix.Geteuid()) + expectedGid = uint32(unix.Getegid()) + ) + + // Create the remaining components. + for _, part := range remainingParts { + switch part { + case "", ".": + // Skip over no-op paths. + continue + } + + // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely + // create the finaly component without worrying about symlink-exchange + // attacks. + if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil { + err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} + // Make the error a bit nicer if the directory is dead. + if err2 := isDeadInode(currentDir); err2 != nil { + err = fmt.Errorf("%w (%w)", err, err2) + } + return nil, err + } + + // Get a handle to the next component. O_DIRECTORY means we don't need + // to use O_PATH. + var nextDir *os.File + if hasOpenat2() { + nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ + Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, + }) + } else { + nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + } + if err != nil { + return nil, err + } + _ = currentDir.Close() + currentDir = nextDir + + // Make sure that the directory matches what we expect. An attacker + // could have swapped the directory between us making it and opening + // it. There's no way for us to be sure that the directory is + // _precisely_ the same as the directory we created, but if we are in + // an empty directory with the same owner and mode as the one we + // created then there is nothing the attacker could do with this new + // directory that they couldn't do with the old one. + if stat, err := fstat(currentDir); err != nil { + return nil, fmt.Errorf("check newly created directory: %w", err) + } else { + if stat.Mode != expectedMode { + return nil, fmt.Errorf("%w: newly created directory %q has incorrect mode 0o%.3o (expected 0o%.3o)", errPossibleAttack, currentDir.Name(), stat.Mode, expectedMode) + } + if stat.Uid != expectedUid || stat.Gid != expectedGid { + return nil, fmt.Errorf("%w: newly created directory %q has incorrect owner %d:%d (expected %d:%d)", errPossibleAttack, currentDir.Name(), stat.Uid, stat.Gid, expectedUid, expectedGid) + } + // Check that the directory is empty. We only need to check for + // a single entry, and we should get EOF if the directory is + // empty. + _, err := currentDir.Readdirnames(1) + if !errors.Is(err, io.EOF) { + if err == nil { + err = fmt.Errorf("%w: newly created directory %q is non-empty", errPossibleAttack, currentDir.Name()) + } + return nil, fmt.Errorf("check if newly created directory %q is empty: %w", currentDir.Name(), err) + } + // Reset the offset. + _, _ = currentDir.Seek(0, unix.SEEK_SET) + } + } + return currentDir, nil +} + +// MkdirAll is a race-safe alternative to the Go stdlib's os.MkdirAll function, +// where the new directory is guaranteed to be within the root directory (if an +// attacker can move directories from inside the root to outside the root, the +// created directory tree might be outside of the root but the key constraint +// is that at no point will we walk outside of the directory tree we are +// creating). +// +// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// err := os.MkdirAll(path, mode) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between SecureJoin and MkdirAll, it is +// possible for MkdirAll to resolve unsafe symlink components and create +// directories outside of the root. +// +// If you plan to open the directory after you have created it or want to use +// an open directory handle as the root, you should use MkdirAllHandle instead. +// This function is a wrapper around MkdirAllHandle. +// +// NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not +// the Go generic mode bits (os.Mode...). +func MkdirAll(root, unsafePath string, mode int) error { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return err + } + defer rootDir.Close() + + f, err := MkdirAllHandle(rootDir, unsafePath, mode) + if err != nil { + return err + } + _ = f.Close() + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go new file mode 100644 index 000000000..52dce76f3 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go @@ -0,0 +1,101 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "fmt" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// OpenatInRoot is equivalent to OpenInRoot, except that the root is provided +// using an *os.File handle, to ensure that the correct root directory is used. +func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { + handle, err := completeLookupInRoot(root, unsafePath) + if err != nil { + return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err} + } + return handle, nil +} + +// OpenInRoot safely opens the provided unsafePath within the root. +// Effectively, OpenInRoot(root, unsafePath) is equivalent to +// +// path, _ := securejoin.SecureJoin(root, unsafePath) +// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) +// +// But is much safer. The above implementation is unsafe because if an attacker +// can modify the filesystem tree between SecureJoin and OpenFile, it is +// possible for the returned file to be outside of the root. +// +// Note that the returned handle is an O_PATH handle, meaning that only a very +// limited set of operations will work on the handle. This is done to avoid +// accidentally opening an untrusted file that could cause issues (such as a +// disconnected TTY that could cause a DoS, or some other issue). In order to +// use the returned handle, you can "upgrade" it to a proper handle using +// Reopen. +func OpenInRoot(root, unsafePath string) (*os.File, error) { + rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer rootDir.Close() + return OpenatInRoot(rootDir, unsafePath) +} + +// Reopen takes an *os.File handle and re-opens it through /proc/self/fd. +// Reopen(file, flags) is effectively equivalent to +// +// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) +// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) +// +// But with some extra hardenings to ensure that we are not tricked by a +// maliciously-configured /proc mount. While this attack scenario is not +// common, in container runtimes it is possible for higher-level runtimes to be +// tricked into configuring an unsafe /proc that can be used to attack file +// operations. See CVE-2019-19921 for more details. +func Reopen(handle *os.File, flags int) (*os.File, error) { + procRoot, err := getProcRoot() + if err != nil { + return nil, err + } + + // We can't operate on /proc/thread-self/fd/$n directly when doing a + // re-open, so we need to open /proc/thread-self/fd and then open a single + // final component. + procFdDir, closer, err := procThreadSelf(procRoot, "fd/") + if err != nil { + return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) + } + defer procFdDir.Close() + defer closer() + + // Try to detect if there is a mount on top of the magic-link we are about + // to open. If we are using unsafeHostProcRoot(), this could change after + // we check it (and there's nothing we can do about that) but for + // privateProcRoot() this should be guaranteed to be safe (at least since + // Linux 5.12[1], when anonymous mount namespaces were completely isolated + // from external mounts including mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + fdStr := strconv.Itoa(int(handle.Fd())) + if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { + return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) + } + + flags |= unix.O_CLOEXEC + // Rather than just wrapping openatFile, open-code it so we can copy + // handle.Name(). + reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) + if err != nil { + return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) + } + return os.NewFile(uintptr(reopenFd), handle.Name()), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go new file mode 100644 index 000000000..921b3e1d4 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go @@ -0,0 +1,141 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "golang.org/x/sys/unix" +) + +var ( + hasOpenat2Bool bool + hasOpenat2Once sync.Once + + testingForceHasOpenat2 *bool +) + +func hasOpenat2() bool { + if testing.Testing() && testingForceHasOpenat2 != nil { + return *testingForceHasOpenat2 + } + hasOpenat2Once.Do(func() { + fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, + }) + if err == nil { + hasOpenat2Bool = true + _ = unix.Close(fd) + } + }) + return hasOpenat2Bool +} + +func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { + // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve + // ".." while a mount or rename occurs anywhere on the system. This could + // happen spuriously, or as the result of an attacker trying to mess with + // us during lookup. + // + // In addition, scoped lookups have a "safety check" at the end of + // complete_walk which will return -EXDEV if the final path is not in the + // root. + return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && + (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) +} + +const scopedLookupMaxRetries = 10 + +func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { + fullPath := dir.Name() + "/" + path + // Make sure we always set O_CLOEXEC. + how.Flags |= unix.O_CLOEXEC + var tries int + for tries < scopedLookupMaxRetries { + fd, err := unix.Openat2(int(dir.Fd()), path, how) + if err != nil { + if scopedLookupShouldRetry(how, err) { + // We retry a couple of times to avoid the spurious errors, and + // if we are being attacked then returning -EAGAIN is the best + // we can do. + tries++ + continue + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} + } + // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. + // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise + // you'll get infinite recursion here. + if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { + if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { + fullPath = actualPath + } + } + return os.NewFile(uintptr(fd), fullPath), nil + } + return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} +} + +func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { + if !partial { + file, err := openat2File(root, unsafePath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + return file, "", err + } + return partialLookupOpenat2(root, unsafePath) +} + +// partialLookupOpenat2 is an alternative implementation of +// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a +// handle to the deepest existing child of the requested path within the root. +func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { + // TODO: Implement this as a git-bisect-like binary search. + + unsafePath = filepath.ToSlash(unsafePath) // noop + endIdx := len(unsafePath) + var lastError error + for endIdx > 0 { + subpath := unsafePath[:endIdx] + + handle, err := openat2File(root, subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, + }) + if err == nil { + // Jump over the slash if we have a non-"" remainingPath. + if endIdx < len(unsafePath) { + endIdx += 1 + } + // We found a subpath! + return handle, unsafePath[endIdx:], lastError + } + if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { + // That path doesn't exist, let's try the next directory up. + endIdx = strings.LastIndexByte(subpath, '/') + lastError = err + continue + } + return nil, "", fmt.Errorf("open subpath: %w", err) + } + // If we couldn't open anything, the whole subpath is missing. Return a + // copy of the root fd so that the caller doesn't close this one by + // accident. + rootClone, err := dupFile(root) + if err != nil { + return nil, "", err + } + return rootClone, unsafePath, lastError +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go new file mode 100644 index 000000000..949fb5f2d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go @@ -0,0 +1,59 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +func dupFile(f *os.File) (*os.File, error) { + fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + +func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.O_CLOEXEC + fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) + if err != nil { + return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} + } + // All of the paths we use with openatFile(2) are guaranteed to be + // lexically safe, so we can use path.Join here. + fullPath := filepath.Join(dir.Name(), path) + return os.NewFile(uintptr(fd), fullPath), nil +} + +func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { + return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} + } + return stat, nil +} + +func readlinkatFile(dir *os.File, path string) (string, error) { + size := 4096 + for { + linkBuf := make([]byte, size) + n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) + if err != nil { + return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} + } + if n != size { + return string(linkBuf[:n]), nil + } + // Possible truncation, resize the buffer. + size *= 2 + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go new file mode 100644 index 000000000..adf0bd08f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go @@ -0,0 +1,474 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" + + "golang.org/x/sys/unix" +) + +func fstat(f *os.File) (unix.Stat_t, error) { + var stat unix.Stat_t + if err := unix.Fstat(int(f.Fd()), &stat); err != nil { + return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} + } + return stat, nil +} + +func fstatfs(f *os.File) (unix.Statfs_t, error) { + var statfs unix.Statfs_t + if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { + return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} + } + return statfs, nil +} + +// The kernel guarantees that the root inode of a procfs mount has an +// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. +const ( + procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC + procRootIno = 1 // PROC_ROOT_INO +) + +func verifyProcRoot(procRoot *os.File) error { + if statfs, err := fstatfs(procRoot); err != nil { + return err + } else if statfs.Type != procSuperMagic { + return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + if stat, err := fstat(procRoot); err != nil { + return err + } else if stat.Ino != procRootIno { + return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) + } + return nil +} + +var ( + hasNewMountApiBool bool + hasNewMountApiOnce sync.Once +) + +func hasNewMountApi() bool { + hasNewMountApiOnce.Do(func() { + // All of the pieces of the new mount API we use (fsopen, fsconfig, + // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can + // just check for one of the syscalls and the others should also be + // available. + // + // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. + // This is equivalent to openat(2), but tells us if open_tree is + // available (and thus all of the other basic new mount API syscalls). + // open_tree(2) is most light-weight syscall to test here. + // + // [1]: merge commit 400913252d09 + // [2]: + fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) + if err == nil { + hasNewMountApiBool = true + _ = unix.Close(fd) + } + }) + return hasNewMountApiBool +} + +func fsopen(fsName string, flags int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSOPEN_CLOEXEC + fd, err := unix.Fsopen(fsName, flags) + if err != nil { + return nil, os.NewSyscallError("fsopen "+fsName, err) + } + return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil +} + +func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { + // Make sure we always set O_CLOEXEC. + flags |= unix.FSMOUNT_CLOEXEC + fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) + if err != nil { + return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) + } + return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil +} + +func newPrivateProcMount() (*os.File, error) { + procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) + if err != nil { + return nil, err + } + defer procfsCtx.Close() + + // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") + _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") + + // Get an actual handle. + if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { + return nil, os.NewSyscallError("fsconfig create procfs", err) + } + return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) +} + +func openTree(dir *os.File, path string, flags uint) (*os.File, error) { + dirFd := -int(unix.EBADF) + dirName := "." + if dir != nil { + dirFd = int(dir.Fd()) + dirName = dir.Name() + } + // Make sure we always set O_CLOEXEC. + flags |= unix.OPEN_TREE_CLOEXEC + fd, err := unix.OpenTree(dirFd, path, flags) + if err != nil { + return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} + } + return os.NewFile(uintptr(fd), dirName+"/"+path), nil +} + +func clonePrivateProcMount() (_ *os.File, Err error) { + // Try to make a clone without using AT_RECURSIVE if we can. If this works, + // we can be sure there are no over-mounts and so if the root is valid then + // we're golden. Otherwise, we have to deal with over-mounts. + procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) + if err != nil || testingForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { + procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) + } + if err != nil { + return nil, fmt.Errorf("creating a detached procfs clone: %w", err) + } + defer func() { + if Err != nil { + _ = procfsHandle.Close() + } + }() + if err := verifyProcRoot(procfsHandle); err != nil { + return nil, err + } + return procfsHandle, nil +} + +func privateProcRoot() (*os.File, error) { + if !hasNewMountApi() || testingForceGetProcRootUnsafe() { + return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) + } + // Try to create a new procfs mount from scratch if we can. This ensures we + // can get a procfs mount even if /proc is fake (for whatever reason). + procRoot, err := newPrivateProcMount() + if err != nil || testingForcePrivateProcRootOpenTree(procRoot) { + // Try to clone /proc then... + procRoot, err = clonePrivateProcMount() + } + return procRoot, err +} + +var ( + procRootHandle *os.File + procRootError error + procRootOnce sync.Once + + errUnsafeProcfs = errors.New("unsafe procfs detected") +) + +func unsafeHostProcRoot() (_ *os.File, Err error) { + procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + defer func() { + if Err != nil { + _ = procRoot.Close() + } + }() + if err := verifyProcRoot(procRoot); err != nil { + return nil, err + } + return procRoot, nil +} + +func doGetProcRoot() (*os.File, error) { + procRoot, err := privateProcRoot() + if err != nil { + // Fall back to using a /proc handle if making a private mount failed. + // If we have openat2, at least we can avoid some kinds of over-mount + // attacks, but without openat2 there's not much we can do. + procRoot, err = unsafeHostProcRoot() + } + return procRoot, err +} + +func getProcRoot() (*os.File, error) { + procRootOnce.Do(func() { + procRootHandle, procRootError = doGetProcRoot() + }) + return procRootHandle, procRootError +} + +var ( + haveProcThreadSelf bool + haveProcThreadSelfOnce sync.Once +) + +type procThreadSelfCloser func() + +// procThreadSelf returns a handle to /proc/thread-self/ (or an +// equivalent handle on older kernels where /proc/thread-self doesn't exist). +// Once finished with the handle, you must call the returned closer function +// (runtime.UnlockOSThread). You must not pass the returned *os.File to other +// Go threads or use the handle after calling the closer. +// +// This is similar to ProcThreadSelf from runc, but with extra hardening +// applied and using *os.File. +func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { + haveProcThreadSelfOnce.Do(func() { + // If the kernel doesn't support thread-self, it doesn't matter which + // /proc handle we use. + _, err := fstatatFile(procRoot, "thread-self", unix.AT_SYMLINK_NOFOLLOW) + haveProcThreadSelf = (err == nil) + }) + + // We need to lock our thread until the caller is done with the handle + // because between getting the handle and using it we could get interrupted + // by the Go runtime and hit the case where the underlying thread is + // swapped out and the original thread is killed, resulting in + // pull-your-hair-out-hard-to-debug issues in the caller. + runtime.LockOSThread() + defer func() { + if Err != nil { + runtime.UnlockOSThread() + } + }() + + // Figure out what prefix we want to use. + threadSelf := "thread-self/" + if !haveProcThreadSelf || testingForceProcSelfTask() { + /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. + threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" + if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || testingForceProcSelf() { + // In this case, we running in a pid namespace that doesn't match + // the /proc mount we have. This can happen inside runc. + // + // Unfortunately, there is no nice way to get the correct TID to + // use here because of the age of the kernel, so we have to just + // use /proc/self and hope that it works. + threadSelf = "self/" + } + } + + // Grab the handle. + var ( + handle *os.File + err error + ) + if hasOpenat2() { + // We prefer being able to use RESOLVE_NO_XDEV if we can, to be + // absolutely sure we are operating on a clean /proc handle that + // doesn't have any cheeky overmounts that could trick us (including + // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't + // stricly needed, but just use it since we have it. + // + // NOTE: /proc/self is technically a magic-link (the contents of the + // symlink are generated dynamically), but it doesn't use + // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. + // + // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses + // procSelfFdReadlink to clean up the returned f.Name() if we use + // RESOLVE_IN_ROOT (which would lead to an infinite recursion). + handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, + Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, + }) + if err != nil { + return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + } + } else { + handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) + if err != nil { + return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err) + } + defer func() { + if Err != nil { + _ = handle.Close() + } + }() + // We can't detect bind-mounts of different parts of procfs on top of + // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we + // aren't on the wrong filesystem here. + if statfs, err := fstatfs(handle); err != nil { + return nil, nil, err + } else if statfs.Type != procSuperMagic { + return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) + } + } + return handle, runtime.UnlockOSThread, nil +} + +var ( + hasStatxMountIdBool bool + hasStatxMountIdOnce sync.Once +) + +func hasStatxMountId() bool { + hasStatxMountIdOnce.Do(func() { + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) + hasStatxMountIdBool = (err == nil && (stx.Mask&wantStxMask != 0)) + }) + return hasStatxMountIdBool +} + +func getMountId(dir *os.File, path string) (uint64, error) { + // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. + if !hasStatxMountId() { + return 0, nil + } + + var ( + stx unix.Statx_t + // We don't care which mount ID we get. The kernel will give us the + // unique one if it is supported. + wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID + ) + + err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) + if stx.Mask&wantStxMask == 0 { + // It's not a kernel limitation, for some reason we couldn't get a + // mount ID. Assume it's some kind of attack. + err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) + } + if err != nil { + return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} + } + return stx.Mnt_id, nil +} + +func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { + // Get the mntId of our procfs handle. + expectedMountId, err := getMountId(procRoot, "") + if err != nil { + return err + } + // Get the mntId of the target magic-link. + gotMountId, err := getMountId(dir, path) + if err != nil { + return err + } + // As long as the directory mount is alive, even with wrapping mount IDs, + // we would expect to see a different mount ID here. (Of course, if we're + // using unsafeHostProcRoot() then an attaker could change this after we + // did this check.) + if expectedMountId != gotMountId { + return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) + } + return nil +} + +func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { + fdPath := fmt.Sprintf("fd/%d", fd) + procFdLink, closer, err := procThreadSelf(procRoot, fdPath) + if err != nil { + return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) + } + defer procFdLink.Close() + defer closer() + + // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly + // provide to the closure. If the closure uses the handle directly, this + // should be safe in general (a mount on top of the path afterwards would + // not affect the handle itself) and will definitely be safe if we are + // using privateProcRoot() (at least since Linux 5.12[1], when anonymous + // mount namespaces were completely isolated from external mounts including + // mount propagation events). + // + // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts + // onto targets that reside on shared mounts"). + if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { + return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) + } + + // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit + // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty + // relative pathnames"). + return readlinkatFile(procFdLink, "") +} + +func rawProcSelfFdReadlink(fd int) (string, error) { + procRoot, err := getProcRoot() + if err != nil { + return "", err + } + return doRawProcSelfFdReadlink(procRoot, fd) +} + +func procSelfFdReadlink(f *os.File) (string, error) { + return rawProcSelfFdReadlink(int(f.Fd())) +} + +var ( + errPossibleBreakout = errors.New("possible breakout detected") + errInvalidDirectory = errors.New("wandered into deleted directory") + errDeletedInode = errors.New("cannot verify path of deleted inode") +) + +func isDeadInode(file *os.File) error { + // If the nlink of a file drops to 0, there is an attacker deleting + // directories during our walk, which could result in weird /proc values. + // It's better to error out in this case. + stat, err := fstat(file) + if err != nil { + return fmt.Errorf("check for dead inode: %w", err) + } + if stat.Nlink == 0 { + err := errDeletedInode + if stat.Mode&unix.S_IFMT == unix.S_IFDIR { + err = errInvalidDirectory + } + return fmt.Errorf("%w %q", err, file.Name()) + } + return nil +} + +func getUmask() int { + // umask is a per-thread property, but it is inherited by children, so we + // need to lock our OS thread to make sure that no other goroutine runs in + // this thread and no goroutines are spawned from this thread until we + // revert to the old umask. + // + // We could parse /proc/self/status to avoid this get-set problem, but + // /proc/thread-self requires LockOSThread anyway, so there's no real + // benefit over just using umask(2). + runtime.LockOSThread() + umask := unix.Umask(0) + unix.Umask(umask) + runtime.UnlockOSThread() + return umask +} + +func checkProcSelfFdPath(path string, file *os.File) error { + if err := isDeadInode(file); err != nil { + return err + } + actualPath, err := procSelfFdReadlink(file) + if err != nil { + return fmt.Errorf("get path of handle: %w", err) + } + if actualPath != path { + return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) + } + return nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go new file mode 100644 index 000000000..a3aedf03d --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go @@ -0,0 +1,68 @@ +//go:build linux + +// Copyright (C) 2024 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import ( + "os" + "testing" +) + +type forceGetProcRootLevel int + +const ( + forceGetProcRootDefault forceGetProcRootLevel = iota + forceGetProcRootOpenTree // force open_tree() + forceGetProcRootOpenTreeAtRecursive // force open_tree(AT_RECURSIVE) + forceGetProcRootUnsafe // force open() +) + +var testingForceGetProcRoot *forceGetProcRootLevel + +func testingCheckClose(check bool, f *os.File) bool { + if check { + if f != nil { + _ = f.Close() + } + return true + } + return false +} + +func testingForcePrivateProcRootOpenTree(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTree, f) +} + +func testingForcePrivateProcRootOpenTreeAtRecursive(f *os.File) bool { + return testing.Testing() && testingForceGetProcRoot != nil && + testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTreeAtRecursive, f) +} + +func testingForceGetProcRootUnsafe() bool { + return testing.Testing() && testingForceGetProcRoot != nil && + *testingForceGetProcRoot >= forceGetProcRootUnsafe +} + +type forceProcThreadSelfLevel int + +const ( + forceProcThreadSelfDefault forceProcThreadSelfLevel = iota + forceProcSelfTask + forceProcSelf +) + +var testingForceProcThreadSelf *forceProcThreadSelfLevel + +func testingForceProcSelfTask() bool { + return testing.Testing() && testingForceProcThreadSelf != nil && + *testingForceProcThreadSelf >= forceProcSelfTask +} + +func testingForceProcSelf() bool { + return testing.Testing() && testingForceProcThreadSelf != nil && + *testingForceProcThreadSelf >= forceProcSelf +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index a82a5eae1..6e27c7dd8 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -1,4 +1,4 @@ -// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index 483743c99..d6d23b3de 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -2,6 +2,7 @@ # This file lists all contributors to the repository. # See scripts/docs/generate-authors.sh to make modifications. +A. Lester Buck III Aanand Prasad Aaron L. Xu Aaron Lehmann @@ -16,6 +17,7 @@ Adolfo Ochagavía Adrian Plata Adrien Duermael Adrien Folie +Adyanth Hosavalike Ahmet Alp Balkan Aidan Feldman Aidan Hobson Sayers @@ -26,7 +28,7 @@ Akim Demaille Alan Thompson Albert Callarisa Alberto Roura -Albin Kerouanton +Albin Kerouanton Aleksa Sarai Aleksander Piotrowski Alessandro Boch @@ -34,6 +36,7 @@ Alex Couture-Beil Alex Mavrogiannis Alex Mayer Alexander Boyd +Alexander Chneerov Alexander Larsson Alexander Morozov Alexander Ryabov @@ -41,6 +44,7 @@ Alexandre González Alexey Igrychev Alexis Couvreur Alfred Landrum +Ali Rostami Alicia Lauerman Allen Sun Alvin Deng @@ -79,7 +83,9 @@ Arko Dasgupta Arnaud Porterie Arnaud Rebillout Arthur Peka +Ashly Mathew Ashwini Oruganti +Aslam Ahemad Azat Khuyiyakhmetov Bardia Keyoumarsi Barnaby Gray @@ -98,7 +104,9 @@ Bill Wang Bin Liu Bingshen Wang Bishal Das +Bjorn Neergaard Boaz Shuster +Boban Acimovic Bogdan Anton Boris Pruessmann Brad Baker @@ -109,6 +117,7 @@ Brent Salisbury Bret Fisher Brian (bex) Exelbierd Brian Goff +Brian Tracy Brian Wieder Bruno Sousa Bryan Bess @@ -136,6 +145,7 @@ Chen Chuanliang Chen Hanxiao Chen Mingjie Chen Qiu +Chris Chinchilla Chris Couzens Chris Gavin Chris Gibson @@ -163,6 +173,8 @@ Conner Crosby Corey Farrell Corey Quon Cory Bennet +Cory Snider +Craig Osterhout Craig Wilhite Cristian Staretu Daehyeok Mun @@ -171,6 +183,7 @@ Daisuke Ito dalanlan Damien Nadé Dan Cotora +Danial Gharib Daniel Artine Daniel Cassidy Daniel Dao @@ -210,6 +223,7 @@ Denis Defreyne Denis Gladkikh Denis Ollier Dennis Docter +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Derek McGowan Des Preston Deshi Xiao @@ -232,11 +246,13 @@ DongGeon Lee Doug Davis Drew Erny Ed Costello +Ed Morley <501702+edmorley@users.noreply.github.com> Elango Sivanandam Eli Uriegas Eli Uriegas Elias Faxö Elliot Luo <956941328@qq.com> +Eric Bode Eric Curtin Eric Engestrom Eric G. Noriega @@ -254,6 +270,7 @@ Eugene Yakubovich Evan Allrich Evan Hazlett Evan Krall +Evan Lezar Evelyn Xu Everett Toews Fabio Falci @@ -275,6 +292,7 @@ Frederik Nordahl Jul Sabroe Frieder Bluemle Gabriel Gore Gabriel Nicolas Avellaneda +Gabriela Georgieva Gaetan de Villele Gang Qiao Gary Schaetz @@ -288,6 +306,7 @@ Gleb Stsenov Goksu Toprak Gou Rao Govind Rai +Graeme Wiebe Grant Reaber Greg Pflaum Gsealy @@ -311,6 +330,7 @@ Hernan Garcia Hongbin Lu Hu Keping Huayi Zhang +Hugo Chastel Hugo Gabriel Eyherabide huqun Huu Nguyen @@ -329,9 +349,12 @@ Ivan Grund Ivan Markin Jacob Atzen Jacob Tomlinson +Jacopo Rigoli Jaivish Kothari Jake Lambert Jake Sanders +Jake Stokes +Jakub Panek James Nesbitt James Turnbull Jamie Hannaford @@ -408,10 +431,12 @@ Josh Chorlton Josh Hawn Josh Horwitz Josh Soref +Julian Julien Barbier Julien Kassar Julien Maitrehenry Justas Brazauskas +Justin Chadwell Justin Cormack Justin Simonelis Justyn Temme @@ -434,7 +459,7 @@ Kelton Bassingthwaite Ken Cochrane Ken ICHIKAWA Kenfe-Mickaël Laventure -Kevin Alvarez +Kevin Alvarez Kevin Burke Kevin Feyrer Kevin Kern @@ -454,6 +479,7 @@ Kyle Mitofsky Lachlan Cooper Lai Jiangshan Lars Kellogg-Stedman +Laura Brehm Laura Frank Laurent Erignoux Lee Gaines @@ -462,10 +488,10 @@ Lennie Leo Gallucci Leonid Skorospelov Lewis Daly +Li Fu Bang Li Yi Li Yi Liang-Chi Hsieh -Lifubang Lihua Tang Lily Guo Lin Lu @@ -480,6 +506,7 @@ Louis Opter Luca Favatella Luca Marturana Lucas Chan +Luis Henrique Mulinari Luka Hartwig Lukas Heeren Lukasz Zajaczkowski @@ -498,6 +525,7 @@ mapk0y Marc Bihlmaier Marc Cornellà Marco Mariani +Marco Spiess Marco Vedovati Marcus Martins Marianna Tessel @@ -522,6 +550,7 @@ Max Shytikov Maxime Petazzoni Maximillian Fan Xavier Mei ChunTao +Melroy van den Berg Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith @@ -593,6 +622,7 @@ Nishant Totla NIWA Hideyuki Noah Treuhaft O.S. Tezer +Oded Arbel Odin Ugedal ohmystack OKA Naoya @@ -604,19 +634,21 @@ Otto Kekäläinen Ovidio Mallo Pascal Borreli Patrick Böänziger +Patrick Daigle <114765035+pdaig@users.noreply.github.com> Patrick Hemmer Patrick Lang Paul Paul Kehrer Paul Lietar Paul Mulders +Paul Seyfert Paul Weaver Pavel Pospisil Paweł Gronowski Paweł Pokrywka Paweł Szczekutowicz Peeyush Gupta -Per Lundberg +Per Lundberg Peter Dave Hello Peter Edge Peter Hsu @@ -639,6 +671,7 @@ Preston Cowley Pure White Qiang Huang Qinglan Peng +QQ喵 qudongfang Raghavendra K T Rahul Kadyan @@ -657,6 +690,7 @@ Rick Wieman Ritesh H Shukla Riyaz Faizullabhoy Rob Gulewich +Rob Murray Robert Wallis Robin Naundorf Robin Speekenbrink @@ -689,6 +723,7 @@ Sandro Jäckel Santhosh Manohar Sargun Dhillon Saswat Bhattacharya +Saurabh Kumar Scott Brenner Scott Collier Sean Christopherson @@ -788,6 +823,7 @@ uhayate Ulrich Bareth Ulysses Souza Umesh Yadav +Vaclav Struhar Valentin Lorentz Vardan Pogosian Venkateswara Reddy Bukkasamudram @@ -795,6 +831,7 @@ Veres Lajos Victor Vieux Victoria Bialas Viktor Stanchev +Ville Skyttä Vimal Raghubir Vincent Batts Vincent Bernat @@ -831,6 +868,7 @@ Yong Tang Yosef Fertel Yu Peng Yuan Sun +Yucheng Wu Yue Zhang Yunxiang Huang Zachary Romero diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index b7c05c3f8..952f6e71f 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -16,32 +16,25 @@ import ( ) const ( - // ConfigFileName is the name of config file + // EnvOverrideConfigDir is the name of the environment variable that can be + // used to override the location of the client configuration files (~/.docker). + // + // It takes priority over the default, but can be overridden by the "--config" + // command line option. + EnvOverrideConfigDir = "DOCKER_CONFIG" + + // ConfigFileName is the name of the client configuration file inside the + // config-directory. ConfigFileName = "config.json" configFileDir = ".docker" - oldConfigfile = ".dockercfg" // Deprecated: remove once we stop printing deprecation warning contextsDir = "contexts" ) var ( initConfigDir = new(sync.Once) configDir string - homeDir string ) -// resetHomeDir is used in testing to reset the "homeDir" package variable to -// force re-lookup of the home directory between tests. -func resetHomeDir() { - homeDir = "" -} - -func getHomeDir() string { - if homeDir == "" { - homeDir = homedir.Get() - } - return homeDir -} - // resetConfigDir is used in testing to reset the "configDir" package variable // and its sync.Once to force re-lookup between tests. func resetConfigDir() { @@ -49,19 +42,14 @@ func resetConfigDir() { initConfigDir = new(sync.Once) } -func setConfigDir() { - if configDir != "" { - return - } - configDir = os.Getenv("DOCKER_CONFIG") - if configDir == "" { - configDir = filepath.Join(getHomeDir(), configFileDir) - } -} - // Dir returns the directory the configuration file is stored in func Dir() string { - initConfigDir.Do(setConfigDir) + initConfigDir.Do(func() { + configDir = os.Getenv(EnvOverrideConfigDir) + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } + }) return configDir } @@ -72,6 +60,8 @@ func ContextStoreDir() string { // SetDir sets the directory the configuration file is stored in func SetDir(dir string) { + // trigger the sync.Once to synchronise with Dir() + initConfigDir.Do(func() {}) configDir = filepath.Clean(dir) } @@ -96,55 +86,43 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { // Load reads the configuration files in the given directory, and sets up // the auth config information and returns values. -// FIXME: use the internal golang config parser func Load(configDir string) (*configfile.ConfigFile, error) { - cfg, _, err := load(configDir) - return cfg, err -} - -// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file -// so we can remove the bool return value and collapse this back into `Load` -func load(configDir string) (*configfile.ConfigFile, bool, error) { - printLegacyFileWarning := false - if configDir == "" { configDir = Dir() } + return load(configDir) +} +func load(configDir string) (*configfile.ConfigFile, error) { filename := filepath.Join(configDir, ConfigFileName) configFile := configfile.New(filename) - // Try happy path first - latest config file - if file, err := os.Open(filename); err == nil { - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = errors.Wrap(err, filename) + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + // + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return configFile, nil } - return configFile, printLegacyFileWarning, err - } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop - return configFile, printLegacyFileWarning, errors.Wrap(err, filename) + return configFile, nil } - - // Can't find latest config file so check for the old one - filename = filepath.Join(getHomeDir(), oldConfigfile) - if _, err := os.Stat(filename); err == nil { - printLegacyFileWarning = true + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrap(err, filename) } - return configFile, printLegacyFileWarning, nil + return configFile, err } // LoadDefaultConfigFile attempts to load the default config file and returns // an initialized ConfigFile struct if none is found. func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, printLegacyFileWarning, err := load(Dir()) + configFile, err := load(Dir()) if err != nil { - fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) - } - if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format has been removed and the configuration file will be ignored") + _, _ = fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) } if !configFile.ContainsAuth() { configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go index 353887547..06b811e7d 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package configfile diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go index 402235bff..a36afc41f 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -1,21 +1,22 @@ package credentials -import ( - exec "golang.org/x/sys/execabs" -) +import "os/exec" // DetectDefaultStore return the default credentials store for the platform if -// the store executable is available. +// no user-defined store is passed, and the store executable is available. func DetectDefaultStore(store string) string { - platformDefault := defaultCredentialsStore() - - // user defined or no default for platform - if store != "" || platformDefault == "" { + if store != "" { + // use user-defined return store } - if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { - return platformDefault + platformDefault := defaultCredentialsStore() + if platformDefault == "" { + return "" + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err != nil { + return "" } - return "" + return platformDefault } diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go index c9630ea51..40c16eb83 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin && !linux -// +build !windows,!darwin,!linux package credentials diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go index f9619b038..b9af145b9 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -51,6 +51,7 @@ func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { auth.Username = creds.Username auth.IdentityToken = creds.IdentityToken auth.Password = creds.Password + auth.ServerAddress = creds.ServerAddress return auth, nil } @@ -76,6 +77,9 @@ func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { ac.Username = creds.Username ac.Password = creds.Password ac.IdentityToken = creds.IdentityToken + if ac.ServerAddress == "" { + ac.ServerAddress = creds.ServerAddress + } authConfigs[registry] = ac } diff --git a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go b/vendor/github.com/docker/distribution/reference/helpers_deprecated.go deleted file mode 100644 index cbd119250..000000000 --- a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import "github.com/distribution/reference" - -// IsNameOnly returns true if reference only contains a repo name. -// -// Deprecated: use [reference.IsNameOnly]. -func IsNameOnly(ref reference.Named) bool { - return reference.IsNameOnly(ref) -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -// -// Deprecated: use [reference.FamiliarName]. -func FamiliarName(ref reference.Named) string { - return reference.FamiliarName(ref) -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -// -// Deprecated: use [reference.FamiliarString]. -func FamiliarString(ref reference.Reference) string { - return reference.FamiliarString(ref) -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See [path.Match] for supported patterns. -// -// Deprecated: use [reference.FamiliarMatch]. -func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) { - return reference.FamiliarMatch(pattern, ref) -} diff --git a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go b/vendor/github.com/docker/distribution/reference/normalize_deprecated.go deleted file mode 100644 index 1b4a459d7..000000000 --- a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go +++ /dev/null @@ -1,92 +0,0 @@ -package reference - -import ( - "regexp" - - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/go-digest/digestset" -) - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -// -// Deprecated: use [reference.ParseNormalizedNamed]. -func ParseNormalizedNamed(s string) (reference.Named, error) { - return reference.ParseNormalizedNamed(s) -} - -// ParseDockerRef normalizes the image reference following the docker convention, -// which allows for references to contain both a tag and a digest. -// -// Deprecated: use [reference.ParseDockerRef]. -func ParseDockerRef(ref string) (reference.Named, error) { - return reference.ParseDockerRef(ref) -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -// -// Deprecated: use [reference.TagNameOnly]. -func TagNameOnly(ref reference.Named) reference.Named { - return reference.TagNameOnly(ref) -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -// -// Deprecated: use [reference.ParseAnyReference]. -func ParseAnyReference(ref string) (reference.Reference, error) { - return reference.ParseAnyReference(ref) -} - -// Functions and types below have been removed in distribution v3 and -// have not been ported to github.com/distribution/reference. See -// https://github.com/distribution/distribution/pull/3774 - -var ( - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - // - // Deprecated: support for short-identifiers is deprecated, and will be removed in v3. - ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) - - shortIdentifier = `([a-f0-9]{6,64})` - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`) -) - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -// -// Deprecated: support for short-identifiers is deprecated, and will be removed in v3. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return reference.ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference_deprecated.go b/vendor/github.com/docker/distribution/reference/reference_deprecated.go deleted file mode 100644 index 5b732498e..000000000 --- a/vendor/github.com/docker/distribution/reference/reference_deprecated.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package reference is deprecated, and has moved to github.com/distribution/reference. -// -// Deprecated: use github.com/distribution/reference instead. -package reference - -import ( - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - // - // Deprecated: use [reference.NameTotalLengthMax]. - NameTotalLengthMax = reference.NameTotalLengthMax -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - // - // Deprecated: use [reference.ErrReferenceInvalidFormat]. - ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - // - // Deprecated: use [reference.ErrTagInvalidFormat]. - ErrTagInvalidFormat = reference.ErrTagInvalidFormat - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - // - // Deprecated: use [reference.ErrDigestInvalidFormat]. - ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - // - // Deprecated: use [reference.ErrNameContainsUppercase]. - ErrNameContainsUppercase = reference.ErrNameContainsUppercase - - // ErrNameEmpty is returned for empty, invalid repository names. - // - // Deprecated: use [reference.ErrNameEmpty]. - ErrNameEmpty = reference.ErrNameEmpty - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - // - // Deprecated: use [reference.ErrNameTooLong]. - ErrNameTooLong = reference.ErrNameTooLong - - // ErrNameNotCanonical is returned when a name is not canonical. - // - // Deprecated: use [reference.ErrNameNotCanonical]. - ErrNameNotCanonical = reference.ErrNameNotCanonical -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -// -// Deprecated: use [reference.Reference]. -type Reference = reference.Reference - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -// -// Deprecated: use [reference.Field]. -type Field = reference.Field - -// AsField wraps a reference in a Field for encoding. -// -// Deprecated: use [reference.AsField]. -func AsField(ref reference.Reference) reference.Field { - return reference.AsField(ref) -} - -// Named is an object with a full name -// -// Deprecated: use [reference.Named]. -type Named = reference.Named - -// Tagged is an object which has a tag -// -// Deprecated: use [reference.Tagged]. -type Tagged = reference.Tagged - -// NamedTagged is an object including a name and tag. -// -// Deprecated: use [reference.NamedTagged]. -type NamedTagged reference.NamedTagged - -// Digested is an object which has a digest -// in which it can be referenced by -// -// Deprecated: use [reference.Digested]. -type Digested reference.Digested - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -// -// Deprecated: use [reference.Canonical]. -type Canonical reference.Canonical - -// Domain returns the domain part of the [Named] reference. -// -// Deprecated: use [reference.Domain]. -func Domain(named reference.Named) string { - return reference.Domain(named) -} - -// Path returns the name without the domain part of the [Named] reference. -// -// Deprecated: use [reference.Path]. -func Path(named reference.Named) (name string) { - return reference.Path(named) -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [reference.Domain] or [reference.Path]. -func SplitHostname(named reference.Named) (string, string) { - return reference.SplitHostname(named) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// -// Deprecated: use [reference.Parse]. -func Parse(s string) (reference.Reference, error) { - return reference.Parse(s) -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// -// Deprecated: use [reference.ParseNamed]. -func ParseNamed(s string) (reference.Named, error) { - return reference.ParseNamed(s) -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -// -// Deprecated: use [reference.WithName]. -func WithName(name string) (reference.Named, error) { - return reference.WithName(name) -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -// -// Deprecated: use [reference.WithTag]. -func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) { - return reference.WithTag(name, tag) -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -// -// Deprecated: use [reference.WithDigest]. -func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) { - return reference.WithDigest(name, digest) -} - -// TrimNamed removes any tag or digest from the named reference. -// -// Deprecated: use [reference.TrimNamed]. -func TrimNamed(ref reference.Named) reference.Named { - return reference.TrimNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go b/vendor/github.com/docker/distribution/reference/regexp_deprecated.go deleted file mode 100644 index 4b9c1b58e..000000000 --- a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go +++ /dev/null @@ -1,50 +0,0 @@ -package reference - -import ( - "github.com/distribution/reference" -) - -// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). -// -// Deprecated: use [reference.DigestRegexp]. -var DigestRegexp = reference.DigestRegexp - -// DomainRegexp matches hostname or IP-addresses, optionally including a port -// number. It defines the structure of potential domain components that may be -// part of image names. This is purposely a subset of what is allowed by DNS to -// ensure backwards compatibility with Docker image names. It may be a subset of -// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between -// square brackets (excluding zone identifiers as defined by [RFC 6874] or special -// addresses such as IPv4-Mapped). -// -// Deprecated: use [reference.DomainRegexp]. -// -// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. -var DomainRegexp = reference.DigestRegexp - -// IdentifierRegexp is the format for string identifier used as a -// content addressable identifier using sha256. These identifiers -// are like digests without the algorithm, since sha256 is used. -// -// Deprecated: use [reference.IdentifierRegexp]. -var IdentifierRegexp = reference.IdentifierRegexp - -// NameRegexp is the format for the name component of references, including -// an optional domain and port, but without tag or digest suffix. -// -// Deprecated: use [reference.NameRegexp]. -var NameRegexp = reference.NameRegexp - -// ReferenceRegexp is the full supported format of a reference. The regexp -// is anchored and has capturing groups for name, tag, and digest -// components. -// -// Deprecated: use [reference.ReferenceRegexp]. -var ReferenceRegexp = reference.ReferenceRegexp - -// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. -// -// Deprecated: use [reference.TagRegexp]. -// -// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 -var TagRegexp = reference.TagRegexp diff --git a/vendor/github.com/docker/distribution/reference/sort_deprecated.go b/vendor/github.com/docker/distribution/reference/sort_deprecated.go deleted file mode 100644 index a73251b6f..000000000 --- a/vendor/github.com/docker/distribution/reference/sort_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package reference - -import "github.com/distribution/reference" - -// Sort sorts string references preferring higher information references. -// -// Deprecated: use [reference.Sort]. -func Sort(references []string) []string { - return reference.Sort(references) -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index b31418192..48d04f9a9 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -27,6 +27,7 @@ Adam Miller Adam Mills Adam Pointer Adam Singer +Adam Thornton Adam Walz Adam Williams AdamKorcz @@ -173,6 +174,7 @@ Andy Rothfusz Andy Smith Andy Wilson Andy Zhang +Aneesh Kulkarni Anes Hasicic Angel Velazquez Anil Belur @@ -236,6 +238,7 @@ Ben Golub Ben Gould Ben Hall Ben Langfeld +Ben Lovy Ben Sargent Ben Severson Ben Toews @@ -262,7 +265,7 @@ Billy Ridgway Bily Zhang Bin Liu Bingshen Wang -Bjorn Neergaard +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott @@ -279,6 +282,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins Brett Kochendorfer @@ -363,6 +367,7 @@ chenyuzhu Chetan Birajdar Chewey Chia-liang Kao +Chiranjeevi Tirunagari chli Cholerae Hu Chris Alfonso @@ -433,8 +438,8 @@ Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian +cui fliter CUI Wei -cuishuang Cuong Manh Le Cyprian Gracz Cyril F @@ -513,6 +518,7 @@ David Dooling David Gageot David Gebler David Glasser +David Karlsson <35727626+dvdksn@users.noreply.github.com> David Lawrence David Lechner David M. Karr @@ -602,6 +608,7 @@ Donald Huang Dong Chen Donghwa Kim Donovan Jones +Dorin Geman Doron Podoleanu Doug Davis Doug MacEachern @@ -636,6 +643,7 @@ Emily Rose Emir Ozer Eng Zer Jun Enguerran +Enrico Weigelt, metux IT consult Eohyung Lee epeterso er0k @@ -676,6 +684,7 @@ Evan Allrich Evan Carmi Evan Hazlett Evan Krall +Evan Lezar Evan Phoenix Evan Wies Evelyn Xu @@ -744,6 +753,7 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin +Frank Villaro-Dixon Frank Yang Fred Lifton Frederick F. Kautz IV @@ -983,6 +993,7 @@ Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Christophe Berthon +Jean-Michel Rouet Jean-Paul Calderone Jean-Pierre Huynh Jean-Tiare Le Bigot @@ -1013,6 +1024,7 @@ Jeroen Jacobs Jesse Dearing Jesse Dubay Jessica Frazelle +Jeyanthinath Muthuram Jezeniel Zapanta Jhon Honce Ji.Zhilong @@ -1141,6 +1153,7 @@ junxu Jussi Nummelin Justas Brazauskas Justen Martin +Justin Chadwell Justin Cormack Justin Force Justin Keller <85903732+jk-vb@users.noreply.github.com> @@ -1183,6 +1196,7 @@ Ke Xu Kei Ohmura Keith Hudgins Keli Hu +Ken Bannister Ken Cochrane Ken Herner Ken ICHIKAWA @@ -1192,7 +1206,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert -Kevin Alvarez +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1225,6 +1239,7 @@ Konstantin Gribov Konstantin L Konstantin Pelykh Kostadin Plachkov +kpcyrd Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister @@ -1306,6 +1321,7 @@ Lorenzo Fontana Lotus Fenn Louis Delossantos Louis Opter +Luboslav Pivarc Luca Favatella Luca Marturana Luca Orlandi @@ -1344,6 +1360,7 @@ Manuel Meurer Manuel Rüger Manuel Woelker mapk0y +Marat Radchenko Marc Abramowitz Marc Kuo Marc Tamsky @@ -1383,6 +1400,7 @@ Martijn van Oosterhout Martin Braun Martin Dojcak Martin Honermeyer +Martin Jirku Martin Kelly Martin Mosegaard Amdisen Martin Muzatko @@ -1461,6 +1479,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kebe Michael Kuehn Michael Käufl Michael Neale @@ -1509,10 +1528,11 @@ Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer +Mike Sul mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi -Milas Bowman +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1524,6 +1544,7 @@ mlarcher Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari +Mohd Sadiq Mohit Soni Moorthy RS Morgan Bauer @@ -1606,6 +1627,7 @@ Noah Treuhaft NobodyOnSE noducks Nolan Darilek +Nolan Miles Noriki Nakamura nponeccop Nurahmadie @@ -1661,6 +1683,7 @@ Paul Lietar Paul Liljenberg Paul Morie Paul Nasrat +Paul Seiffert Paul Weaver Paulo Gomes Paulo Ribeiro @@ -1674,6 +1697,7 @@ Pavlos Ratis Pavol Vargovcik Pawel Konczalski Paweł Gronowski +payall4u Peeyush Gupta Peggy Li Pei Su @@ -1703,7 +1727,9 @@ Phil Estes Phil Sphicas Phil Spitler Philip Alexander Etling +Philip K. Warren Philip Monroe +Philipp Fruck Philipp Gillé Philipp Wahala Philipp Weissensteiner @@ -1741,6 +1767,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Rachit Sharma Radostin Stoyanov Rafal Jeczalik Rafe Colton @@ -1773,6 +1800,7 @@ Rich Horwood Rich Moyse Rich Seymour Richard Burnison +Richard Hansen Richard Harvey Richard Mathie Richard Metzler @@ -1788,6 +1816,7 @@ Ritesh H Shukla Riyaz Faizullabhoy Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich +Rob Murray Rob Vesse Robert Bachmann Robert Bittle @@ -1869,6 +1898,7 @@ ryancooper7 RyanDeng Ryo Nakao Ryoga Saito +Régis Behmo Rémy Greinhofer s. rannou Sabin Basyal @@ -1885,6 +1915,7 @@ Sam J Sharpe Sam Neirinck Sam Reis Sam Rijs +Sam Thibault Sam Whited Sambuddha Basu Sami Wagiaalla @@ -1908,6 +1939,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Moser Scott Percival Scott Stamp Scott Walls @@ -1923,6 +1955,7 @@ Sebastiaan van Steenis Sebastiaan van Stijn Sebastian Höffner Sebastian Radloff +Sebastian Thomschke Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran @@ -1996,6 +2029,7 @@ Stanislav Bondarenko Stanislav Levin Steeve Morin Stefan Berger +Stefan Gehrig Stefan J. Wernli Stefan Praszalowicz Stefan S. @@ -2003,6 +2037,7 @@ Stefan Scherer Stefan Staudenmeyer Stefan Weil Steffen Butzer +Stephan Henningsen Stephan Spindler Stephen Benjamin Stephen Crosby @@ -2204,6 +2239,7 @@ Vinod Kulkarni Vishal Doshi Vishnu Kannan Vitaly Ostrosablin +Vitor Anjos Vitor Monteiro Vivek Agarwal Vivek Dasgupta @@ -2250,6 +2286,7 @@ Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang Wes Morgan +Wesley Pettit Wewang Xiaorenfine Wiktor Kwapisiewicz Will Dietz @@ -2289,7 +2326,7 @@ xiekeyang Ximo Guanter Gonzálbez xin.li Xinbo Weng -Xinfeng Liu +Xinfeng Liu Xinzi Zhou Xiuming Chen Xuecong Liao @@ -2355,6 +2392,7 @@ Zen Lin(Zhinan Lin) Zhang Kun Zhang Wei Zhang Wentao +zhangguanzhang ZhangHang zhangxianwei Zhenan Ye <21551168@zju.edu.cn> @@ -2381,6 +2419,7 @@ Zuhayr Elahi Zunayed Ali Álvaro Lázaro Átila Camurça Alves +吴小白 <296015668@qq.com> 尹吉峰 屈骏 徐俊杰 diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index 9ee329a2f..000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,7 +0,0 @@ -package types // import "github.com/docker/docker/api/types" -import "github.com/docker/docker/api/types/registry" - -// AuthConfig contains authorization information for connecting to a Registry. -// -// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig -type AuthConfig = registry.AuthConfig diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90..000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index d8cd30613..000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,444 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - units "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// NewHijackedResponse intializes a HijackedResponse type -func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { - return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - mediaType string - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. -// returns false if HTTP Content-Type is not relevant, and container must be inspected -func (h *HijackedResponse) MediaType() (string, bool) { - if h.mediaType == "" { - return "", false - } - return h.mediaType, true -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]registry.AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the unerlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to list images with. -type ImageListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 7d5930bbe..000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,67 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *ocispec.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go deleted file mode 100644 index 6b4b47390..000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go +++ /dev/null @@ -1,6 +0,0 @@ -package container - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// -// Deprecated: use [FilesystemChange]. -type ContainerChangeResponseItem = FilesystemChange diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/docker/docker/api/types/container/change_type.go deleted file mode 100644 index fe8d6d369..000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_type.go +++ /dev/null @@ -1,15 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ChangeType Kind of change -// -// Can be one of: -// -// - `0`: Modified ("C") -// - `1`: Added ("A") -// - `2`: Deleted ("D") -// -// swagger:model ChangeType -type ChangeType uint8 diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/docker/docker/api/types/container/change_types.go deleted file mode 100644 index 3a3a83866..000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_types.go +++ /dev/null @@ -1,23 +0,0 @@ -package container - -const ( - // ChangeModify represents the modify operation. - ChangeModify ChangeType = 0 - // ChangeAdd represents the add operation. - ChangeAdd ChangeType = 1 - // ChangeDelete represents the delete operation. - ChangeDelete ChangeType = 2 -) - -func (ct ChangeType) String() string { - switch ct { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - default: - return "" - } -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index 077583e66..000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,96 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "io" - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// StopOptions holds the options to stop or restart a container. -type StopOptions struct { - // Signal (optional) is the signal to send to the container to (gracefully) - // stop it before forcibly terminating the container with SIGKILL after the - // timeout expires. If not value is set, the default (SIGTERM) is used. - Signal string `json:",omitempty"` - - // Timeout (optional) is the timeout (in seconds) to wait for the container - // to stop gracefully before forcibly terminating it with SIGKILL. - // - // - Use nil to use the default timeout (10 seconds). - // - Use '-1' to wait indefinitely. - // - Use '0' to not wait for the container to exit gracefully, and - // immediately proceeds to forcibly terminating the container. - // - Other positive values are used as timeout (in seconds). - Timeout *int `json:",omitempty"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// ExecStartOptions holds the options to start container's exec. -type ExecStartOptions struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - ConsoleSize *[2]uint `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da36..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go deleted file mode 100644 index aa0e7f7d0..000000000 --- a/vendor/github.com/docker/docker/api/types/container/create_response.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateResponse ContainerCreateResponse -// -// OK response to ContainerCreate operation -// swagger:model CreateResponse -type CreateResponse struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go deleted file mode 100644 index 9e9c2ad1d..000000000 --- a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// FilesystemChange Change in the container's filesystem. -// -// swagger:model FilesystemChange -type FilesystemChange struct { - - // kind - // Required: true - Kind ChangeType `json:"Kind"` - - // Path to file or directory that has changed. - // - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go deleted file mode 100644 index d4e6f5537..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ /dev/null @@ -1,456 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" -) - -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// cgroup namespace modes for containers -const ( - CgroupnsModeEmpty CgroupnsMode = "" - CgroupnsModePrivate CgroupnsMode = "private" - CgroupnsModeHost CgroupnsMode = "host" -) - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == CgroupnsModePrivate -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == CgroupnsModeHost -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == CgroupnsModeEmpty -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// Isolation modes for containers -const ( - IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) - IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon - IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode - IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode -) - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - // TODO consider making isolation-mode strict (case-sensitive) - v := Isolation(strings.ToLower(string(i))) - return v == IsolationDefault || v == IsolationEmpty -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationHyperV -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationProcess -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IpcMode constants -const ( - IPCModeNone IpcMode = "none" - IPCModeHost IpcMode = "host" - IPCModeContainer IpcMode = "container" - IPCModePrivate IpcMode = "private" - IPCModeShareable IpcMode = "shareable" -) - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == IPCModePrivate -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == IPCModeHost -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == IPCModeShareable -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == IPCModeNone -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !n.IsHost() -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - return n == "" || n.IsHost() -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - _, ok := containerID(string(c)) - return ok -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. - return c == "" || c.IsContainer() -} - -// Container returns the ID or name of the container whose cgroup will be used. -func (c CgroupSpec) Container() (idOrName string) { - idOrName, _ = containerID(string(c)) - return idOrName -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !n.IsHost() -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - return n == "" || n.IsHost() -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - return n == "" || n.IsHost() || validContainer(string(n)) -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset LogMode = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers - - // KernelMemory specifies the kernel memory limit (in bytes) for the container. - // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - ConsoleSize [2]uint // Initial console size (height,width) - Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} - -// containerID splits "container:" values. It returns the container -// ID or name, and whether an ID/name was found. It returns an empty string and -// a "false" if the value does not have a "container:" prefix. Further validation -// of the returned, including checking if the value is empty, should be handled -// by the caller. -func containerID(val string) (idOrName string, ok bool) { - k, v, hasSep := strings.Cut(val, ":") - if !hasSep || k != "container" { - return "", false - } - return v, true -} - -// validContainer checks if the given value is a "container:" mode with -// a non-empty name/ID. -func validContainer(val string) bool { - id, ok := containerID(val) - return ok && id != "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index 24c4fa8d9..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !windows -// +build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5b..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go deleted file mode 100644 index ab56d4eed..000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go +++ /dev/null @@ -1,12 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitExitError container waiting error, if any -// swagger:model WaitExitError -type WaitExitError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go deleted file mode 100644 index 84fc6afdd..000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitResponse ContainerWaitResponse -// -// OK response to ContainerWait operation -// swagger:model WaitResponse -type WaitResponse struct { - - // error - Error *WaitExitError `json:"Error,omitempty"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99..000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index ce3deb331..000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about the storage driver used to store the container's and -// image's filesystem. -// -// swagger:model GraphDriverData -type GraphDriverData struct { - - // Low-level storage metadata, provided as key/value pairs. - // - // This information is driver-specific, and depends on the storage-driver - // in use, and should be used for informational purposes only. - // - // Required: true - Data map[string]string `json:"Data"` - - // Name of the storage driver. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b..000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8..000000000 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index 0f6f14484..000000000 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,94 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // Number of containers using this image. Includes both stopped and running - // containers. - // - // This size is not calculated by default, and depends on which API endpoint - // is used. `-1` indicates that the value has not been set / calculated. - // - // Required: true - Containers int64 `json:"Containers"` - - // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). - // - // Required: true - Created int64 `json:"Created"` - - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - // - // Required: true - ID string `json:"Id"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - // - // Required: true - ParentID string `json:"ParentId"` - - // List of content-addressable digests of locally available image manifests - // that the image is referenced from. Multiple manifests can refer to the - // same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - // - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // List of image names/tags in the local image cache that reference this - // image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - // - // Required: true - RepoTags []string `json:"RepoTags"` - - // Total size of image layers that are shared between this image and other - // images. - // - // This size is not calculated by default. `-1` indicates that the value - // has not been set / calculated. - // - // Required: true - SharedSize int64 `json:"SharedSize"` - - // Total size of the image including all layers it is composed of. - // - // Required: true - Size int64 `json:"Size"` - - // Total size of the image including all layers it is composed of. - // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Images are now stored - // self-contained, and no longer use a parent-chain, making this field - // an equivalent of the Size field. - // - // Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. - VirtualSize int64 `json:"VirtualSize,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index ac4ce6223..000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,140 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" - // TypeCluster is the type for Swarm Cluster Volumes. - TypeCluster Type = "cluster" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` - ClusterOptions *ClusterOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` - CreateMountpoint bool `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} - -// ClusterOptions specifies options for a Cluster volume. -type ClusterOptions struct { - // intentionally empty -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 437b184c6..000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,126 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" -import ( - "github.com/docker/docker/api/types/filters" -) - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9a..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2e..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744..000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index b83f5d7b2..05cb31075 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -92,7 +92,9 @@ type SearchResult struct { IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` - // IsAutomated indicates whether the result is automated + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1b..000000000 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 20daebed1..000000000 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc..000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index 5ded7dba8..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,48 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "strconv" - "time" -) - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// String implements fmt.Stringer interface. -func (v Version) String() string { - return strconv.FormatUint(v.Index, 10) -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccce..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index af5e1c0bc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,80 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - CapabilityAdd []string `json:",omitempty"` - CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index bb98d5eed..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,139 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` - CSIInfo []NodeCSIInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// NodeCSIInfo represents information about a CSI plugin available on the node -type NodeCSIInfo struct { - // PluginName is the name of the CSI plugin. - PluginName string `json:",omitempty"` - // NodeID is the ID of the node as reported by the CSI plugin. This is - // different from the swarm node ID. - NodeID string `json:",omitempty"` - // MaxVolumesPerNode is the maximum number of volumes that may be published - // to this node - MaxVolumesPerNode int64 `json:",omitempty"` - // AccessibleTopology indicates the location of this node in the CSI - // plugin's topology - AccessibleTopology *Topology `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) - -// Topology defines the CSI topology of this node. This type is a duplicate of -// github.com/docker/docker/api/types.Topology. Because the type definition -// is so simple and to avoid complicated structure or circular imports, we just -// duplicate it here. See that type for full documentation -type Topology struct { - Segments map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403cc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c3..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index e45045866..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,754 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 9ef169046..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec98..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 6eb452d24..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,202 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 3eae4b9b2..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,237 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Status provides information about the current swarm status and role, -// obtained from the "Swarm" header in the API response. -type Status struct { - // NodeState represents the state of the node. - NodeState LocalNodeState - - // ControlAvailable indicates if the node is a swarm manager. - ControlAvailable bool -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index ad3eeca0b..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,225 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` - - // Volumes is the list of VolumeAttachments for this task. It specifies - // which particular volumes are to be used by this particular task, and - // fulfilling what mounts in the spec. - Volumes []VolumeAttachment -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} - -// VolumeAttachment contains the associating a Volume to a Task. -type VolumeAttachment struct { - // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. - ID string `json:",omitempty"` - - // Source, together with Target, indicates the Mount, as specified in the - // ContainerSpec, that this volume fulfills. - Source string `json:",omitempty"` - - // Target, together with Source, indicates the Mount, as specified - // in the ContainerSpec, that this volume fulfills. - Target string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index b413e0200..000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,811 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" - "github.com/docker/go-connections/nat" -) - -const ( - // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams - MediaTypeRawStream = "application/vnd.docker.raw-stream" - - // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams - MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string `json:",omitempty"` - Layers []string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - ID string `json:"Id"` - - // RepoTags is a list of image names/tags in the local image cache that - // reference this image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - RepoTags []string - - // RepoDigests is a list of content-addressable digests of locally available - // image manifests that the image is referenced from. Multiple manifests can - // refer to the same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - RepoDigests []string - - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string - - // Created is the date and time at which the image was created, formatted in - // RFC 3339 nano-seconds (time.RFC3339Nano). - Created string - - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - Container string - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - ContainerConfig *container.Config - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string - Config *container.Config - - // Architecture is the hardware CPU architecture that the image runs on. - Architecture string - - // Variant is the CPU architecture variant (presently ARM-only). - Variant string `json:",omitempty"` - - // OS is the Operating System the image is built to run on. - Os string - - // OsVersion is the version of the Operating System the image is built to - // run on (especially for Windows). - OsVersion string `json:",omitempty"` - - // Size is the total size of the image including all layers it is composed of. - Size int64 - - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Docker v1.10 and up - // store images self-contained, and no longer use a parent-chain, making - // this field an equivalent of the Size field. - // - // Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions. - VirtualSize int64 `json:"VirtualSize,omitempty"` - - // GraphDriver holds information about the storage driver used to store the - // container's and image's filesystem. - GraphDriver GraphDriverData - - // RootFS contains information about the image's RootFS, including the - // layer IDs. - RootFS RootFS - - // Metadata of the image in the local cache. - // - // This information is local to the daemon, and not part of the image itself. - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion - - // SwarmStatus provides information about the current swarm status of the - // engine, obtained from the "Swarm" header in the API response. - // - // It can be a nil struct if the API version does not provide this header - // in the ping response, or if an error occurred, in which case the client - // should use other ways to get the current swarm status, such as the /swarm - // endpoint. - SwarmStatus *swarm.Status -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type - Type mount.Type `json:",omitempty"` - - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name. - Name string `json:",omitempty"` - - // Source is the source location of the mount. - // - // For volumes, this contains the storage location of the volume (within - // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - // the source (host) part of the bind-mount. For `tmpfs` mount points, this - // field is empty. - Source string - - // Destination is the path relative to the container root (`/`) where the - // Source is mounted inside the container. - Destination string - - // Driver is the volume driver used to create the volume (if it is a volume). - Driver string `json:",omitempty"` - - // Mode is a comma separated list of options supplied by the user when - // creating the bind/volume mount. - // - // The default is platform-specific (`"z"` on Linux, empty on Windows). - Mode string - - // RW indicates whether the mount is mounted writable (read-write). - RW bool - - // Propagation describes how mounts are propagated from the host into the - // mount point, and vice-versa. Refer to the Linux kernel documentation - // for details: - // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // - // This field is not used on Windows. - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - // "Legacy" runtime configuration for runc-compatible runtimes. - - Path string `json:"path,omitempty"` - Args []string `json:"runtimeArgs,omitempty"` - - // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` - - // This is exposed here only for internal use - ShimConfig *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - -// DiskUsageObject represents an object type used for disk usage query filtering. -type DiskUsageObject string - -const ( - // ContainerObject represents a container DiskUsageObject. - ContainerObject DiskUsageObject = "container" - // ImageObject represents an image DiskUsageObject. - ImageObject DiskUsageObject = "image" - // VolumeObject represents a volume DiskUsageObject. - VolumeObject DiskUsageObject = "volume" - // BuildCacheObject represents a build-cache DiskUsageObject. - BuildCacheObject DiskUsageObject = "build-cache" -) - -// DiskUsageOptions holds parameters for system disk usage query. -type DiskUsageOptions struct { - // Types specifies what object types to include in the response. If empty, - // all object types are returned. - Types []DiskUsageObject -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*volume.Volume - BuildCache []*BuildCache - BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record. -type BuildCache struct { - // ID is the unique ID of the build cache record. - ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` - // Parents is the list of parent build cache record IDs. - Parents []string `json:" Parents,omitempty"` - // Type is the cache record type. - Type string - // Description is a description of the build-step that produced the build cache. - Description string - // InUse indicates if the build cache is in use. - InUse bool - // Shared indicates if the build cache is shared. - Shared bool - // Size is the amount of disk space used by the build cache (in bytes). - Size int64 - // CreatedAt is the date and time at which the build cache was created. - CreatedAt time.Time - // LastUsedAt is the date and time at which the build cache was last used. - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go deleted file mode 100644 index 55fc5d389..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ /dev/null @@ -1,420 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/api/types/swarm" -) - -// ClusterVolume contains options and information specific to, and only present -// on, Swarm CSI cluster volumes. -type ClusterVolume struct { - // ID is the Swarm ID of the volume. Because cluster volumes are Swarm - // objects, they have an ID, unlike non-cluster volumes, which only have a - // Name. This ID can be used to refer to the cluster volume. - ID string - - // Meta is the swarm metadata about this volume. - swarm.Meta - - // Spec is the cluster-specific options from which this volume is derived. - Spec ClusterVolumeSpec - - // PublishStatus contains the status of the volume as it pertains to its - // publishing on Nodes. - PublishStatus []*PublishStatus `json:",omitempty"` - - // Info is information about the global status of the volume. - Info *Info `json:",omitempty"` -} - -// ClusterVolumeSpec contains the spec used to create this volume. -type ClusterVolumeSpec struct { - // Group defines the volume group of this volume. Volumes belonging to the - // same group can be referred to by group name when creating Services. - // Referring to a volume by group instructs swarm to treat volumes in that - // group interchangeably for the purpose of scheduling. Volumes with an - // empty string for a group technically all belong to the same, emptystring - // group. - Group string `json:",omitempty"` - - // AccessMode defines how the volume is used by tasks. - AccessMode *AccessMode `json:",omitempty"` - - // AccessibilityRequirements specifies where in the cluster a volume must - // be accessible from. - // - // This field must be empty if the plugin does not support - // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the - // plugin does not support it, volume will not be created. - // - // If AccessibilityRequirements is empty, but the plugin does support - // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire - // cluster is a valid target for the volume. - AccessibilityRequirements *TopologyRequirement `json:",omitempty"` - - // CapacityRange defines the desired capacity that the volume should be - // created with. If nil, the plugin will decide the capacity. - CapacityRange *CapacityRange `json:",omitempty"` - - // Secrets defines Swarm Secrets that are passed to the CSI storage plugin - // when operating on this volume. - Secrets []Secret `json:",omitempty"` - - // Availability is the Volume's desired availability. Analogous to Node - // Availability, this allows the user to take volumes offline in order to - // update or delete them. - Availability Availability `json:",omitempty"` -} - -// Availability specifies the availability of the volume. -type Availability string - -const ( - // AvailabilityActive indicates that the volume is active and fully - // schedulable on the cluster. - AvailabilityActive Availability = "active" - - // AvailabilityPause indicates that no new workloads should use the - // volume, but existing workloads can continue to use it. - AvailabilityPause Availability = "pause" - - // AvailabilityDrain indicates that all workloads using this volume - // should be rescheduled, and the volume unpublished from all nodes. - AvailabilityDrain Availability = "drain" -) - -// AccessMode defines the access mode of a volume. -type AccessMode struct { - // Scope defines the set of nodes this volume can be used on at one time. - Scope Scope `json:",omitempty"` - - // Sharing defines the number and way that different tasks can use this - // volume at one time. - Sharing SharingMode `json:",omitempty"` - - // MountVolume defines options for using this volume as a Mount-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - MountVolume *TypeMount `json:",omitempty"` - - // BlockVolume defines options for using this volume as a Block-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - BlockVolume *TypeBlock `json:",omitempty"` -} - -// Scope defines the Scope of a Cluster Volume. This is how many nodes a -// Volume can be accessed simultaneously on. -type Scope string - -const ( - // ScopeSingleNode indicates the volume can be used on one node at a - // time. - ScopeSingleNode Scope = "single" - - // ScopeMultiNode indicates the volume can be used on many nodes at - // the same time. - ScopeMultiNode Scope = "multi" -) - -// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a -// Volume at the same time can use it. -type SharingMode string - -const ( - // SharingNone indicates that only one Task may use the Volume at a - // time. - SharingNone SharingMode = "none" - - // SharingReadOnly indicates that the Volume may be shared by any - // number of Tasks, but they must be read-only. - SharingReadOnly SharingMode = "readonly" - - // SharingOneWriter indicates that the Volume may be shared by any - // number of Tasks, but all after the first must be read-only. - SharingOneWriter SharingMode = "onewriter" - - // SharingAll means that the Volume may be shared by any number of - // Tasks, as readers or writers. - SharingAll SharingMode = "all" -) - -// TypeBlock defines options for using a volume as a block-type volume. -// -// Intentionally empty. -type TypeBlock struct{} - -// TypeMount contains options for using a volume as a Mount-type -// volume. -type TypeMount struct { - // FsType specifies the filesystem type for the mount volume. Optional. - FsType string `json:",omitempty"` - - // MountFlags defines flags to pass when mounting the volume. Optional. - MountFlags []string `json:",omitempty"` -} - -// TopologyRequirement expresses the user's requirements for a volume's -// accessible topology. -type TopologyRequirement struct { - // Requisite specifies a list of Topologies, at least one of which the - // volume must be accessible from. - // - // Taken verbatim from the CSI Spec: - // - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, then the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, then the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - Requisite []Topology `json:",omitempty"` - - // Preferred is a list of Topologies that the volume should attempt to be - // provisioned in. - // - // Taken from the CSI spec: - // - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - Preferred []Topology `json:",omitempty"` -} - -// Topology is a map of topological domains to topological segments. -// -// This description is taken verbatim from the CSI Spec: -// -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is REQUIRED. The prefix is OPTIONAL. -// The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -type Topology struct { - Segments map[string]string `json:",omitempty"` -} - -// CapacityRange describes the minimum and maximum capacity a volume should be -// created with -type CapacityRange struct { - // RequiredBytes specifies that a volume must be at least this big. The - // value of 0 indicates an unspecified minimum. - RequiredBytes int64 - - // LimitBytes specifies that a volume must not be bigger than this. The - // value of 0 indicates an unspecified maximum - LimitBytes int64 -} - -// Secret represents a Swarm Secret value that must be passed to the CSI -// storage plugin when operating on this Volume. It represents one key-value -// pair of possibly many. -type Secret struct { - // Key is the name of the key of the key-value pair passed to the plugin. - Key string - - // Secret is the swarm Secret object from which to read data. This can be a - // Secret name or ID. The Secret data is retrieved by Swarm and used as the - // value of the key-value pair passed to the plugin. - Secret string -} - -// PublishState represents the state of a Volume as it pertains to its -// use on a particular Node. -type PublishState string - -const ( - // StatePending indicates that the volume should be published on - // this node, but the call to ControllerPublishVolume has not been - // successfully completed yet and the result recorded by swarmkit. - StatePending PublishState = "pending-publish" - - // StatePublished means the volume is published successfully to the node. - StatePublished PublishState = "published" - - // StatePendingNodeUnpublish indicates that the Volume should be - // unpublished on the Node, and we're waiting for confirmation that it has - // done so. After the Node has confirmed that the Volume has been - // unpublished, the state will move to StatePendingUnpublish. - StatePendingNodeUnpublish PublishState = "pending-node-unpublish" - - // StatePendingUnpublish means the volume is still published to the node - // by the controller, awaiting the operation to unpublish it. - StatePendingUnpublish PublishState = "pending-controller-unpublish" -) - -// PublishStatus represents the status of the volume as published to an -// individual node -type PublishStatus struct { - // NodeID is the ID of the swarm node this Volume is published to. - NodeID string `json:",omitempty"` - - // State is the publish state of the volume. - State PublishState `json:",omitempty"` - - // PublishContext is the PublishContext returned by the CSI plugin when - // a volume is published. - PublishContext map[string]string `json:",omitempty"` -} - -// Info contains information about the Volume as a whole as provided by -// the CSI storage plugin. -type Info struct { - // CapacityBytes is the capacity of the volume in bytes. A value of 0 - // indicates that the capacity is unknown. - CapacityBytes int64 `json:",omitempty"` - - // VolumeContext is the context originating from the CSI storage plugin - // when the Volume is created. - VolumeContext map[string]string `json:",omitempty"` - - // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This - // is distinct from the Volume's Swarm ID, which is the ID used by all of - // the Docker Engine to refer to the Volume. If this field is blank, then - // the Volume has not been successfully created yet. - VolumeID string `json:",omitempty"` - - // AccessibleTopolgoy is the topology this volume is actually accessible - // from. - AccessibleTopology []Topology `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go deleted file mode 100644 index 37c41a609..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/create_options.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateOptions VolumeConfig -// -// Volume configuration -// swagger:model CreateOptions -type CreateOptions struct { - - // cluster volume spec - ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` - - // Name of the volume driver to use. - Driver string `json:"Driver,omitempty"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - DriverOpts map[string]string `json:"DriverOpts,omitempty"` - - // User-defined key/value metadata. - Labels map[string]string `json:"Labels,omitempty"` - - // The new volume's name. If not specified, Docker generates a name. - // - Name string `json:"Name,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go deleted file mode 100644 index ca5192a2a..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/list_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ListResponse VolumeListResponse -// -// Volume list response -// swagger:model ListResponse -type ListResponse struct { - - // List of volumes - Volumes []*Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go deleted file mode 100644 index 8b0dd1389..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ /dev/null @@ -1,8 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -import "github.com/docker/docker/api/types/filters" - -// ListOptions holds parameters to list volumes. -type ListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go deleted file mode 100644 index ea7d555e5..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume.go +++ /dev/null @@ -1,75 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // cluster volume - ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *UsageData `json:"UsageData,omitempty"` -} - -// UsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model UsageData -type UsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go deleted file mode 100644 index f958f80a6..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_update.go +++ /dev/null @@ -1,7 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// UpdateOptions is configuration to update a Volume with. -type UpdateOptions struct { - // Spec is the ClusterVolumeSpec to update the volume to. - Spec *ClusterVolumeSpec `json:"Spec,omitempty"` -} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go index 61e7456b4..a5523c3e9 100644 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs // ErrNotFound signals that the requested object doesn't exist type ErrNotFound interface { diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index fe06fb6f7..042de4b7b 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import "context" diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 77bda389d..ebcd78930 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import ( "net/http" diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index 3abf07d0c..f94034cbd 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -1,9 +1,18 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs + +import ( + "context" + "errors" +) type causer interface { Cause() error } +type wrapErr interface { + Unwrap() error +} + func getImplementer(err error) error { switch e := err.(type) { case @@ -23,6 +32,8 @@ func getImplementer(err error) error { return err case causer: return getImplementer(e.Cause()) + case wrapErr: + return getImplementer(e.Unwrap()) default: return err } @@ -105,3 +116,8 @@ func IsDataLoss(err error) bool { _, ok := getImplementer(err).(ErrDataLoss) return ok } + +// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. +func IsContext(err error) bool { + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 000000000..590683206 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,44 @@ +package homedir + +import ( + "os" + "os/user" + "runtime" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func Key() string { + return envKeyName +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func GetShortcutString() string { + return homeShortCut +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 11f1bec98..4eeb26b5d 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package homedir // import "github.com/docker/docker/pkg/homedir" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index d1732dee5..feae4d736 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,39 +1,8 @@ //go:build !windows -// +build !windows package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" - "os/user" +const ( + envKeyName = "HOME" + homeShortCut = "~" ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go index 2f81813b2..37f4ee670 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -1,24 +1,6 @@ package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" +const ( + envKeyName = "USERPROFILE" + homeShortCut = "%USERPROFILE%" // be careful while using in format functions ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index de00b95e3..e03d3fee7 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" "io" + "runtime/debug" + "sync/atomic" // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. _ "crypto/sha256" _ "crypto/sha512" + + "github.com/containerd/log" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -16,10 +20,15 @@ import ( type ReadCloserWrapper struct { io.Reader closer func() error + closed atomic.Bool } // Close calls back the passed closer function func (r *ReadCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("ReadCloserWrapper") + return nil + } return r.closer() } @@ -87,6 +96,7 @@ type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter + closed atomic.Bool } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the @@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) { // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { + if !p.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("cancelReadCloser") + return nil + } p.closeWithError(io.EOF) return nil } + +func subsequentCloseWarn(name string) { + log.G(context.TODO()).Error("subsequent attempt to close " + name) + if log.GetLevel() >= log.DebugLevel { + log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go b/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go deleted file mode 100644 index b3321602c..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package ioutils - -import "github.com/docker/docker/pkg/longpath" - -// TempDir is the equivalent of [os.MkdirTemp], except that on Windows -// the result is in Windows longpath format. On Unix systems it is -// equivalent to [os.MkdirTemp]. -// -// Deprecated: use [longpath.MkdirTemp]. -var TempDir = longpath.MkdirTemp diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 61c679497..1f50602f2 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,6 +1,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "io" +import ( + "io" + "sync/atomic" +) // NopWriter represents a type which write operation is nop. type NopWriter struct{} @@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error + closed atomic.Bool } func (r *writeCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("WriteCloserWrapper") + return nil + } return r.closer() } diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index 035160c83..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,307 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/moby/term" - "github.com/morikuni/aec" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, Code is -// an integer error code, Message is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a progress message in a JSON stream. -type JSONProgress struct { - // Current is the current status and value of the progress made towards Total. - Current int64 `json:"current,omitempty"` - // Total is the end value describing when we made 100% progress for an operation. - Total int64 `json:"total,omitempty"` - // Start is the initial value for the operation. - Start int64 `json:"start,omitempty"` - // HideCounts. if true, hides the progress count indicator (xB/yB). - HideCounts bool `json:"hidecounts,omitempty"` - // Units is the unit to print for progress. It defaults to "bytes" if empty. - Units string `json:"units,omitempty"` - - // terminalFd is the fd of the current terminal, if any. It is used - // to get the terminal width. - terminalFd uintptr - - // nowFunc is used to override the current time in tests. - nowFunc func() time.Time - - // winSize is used to override the terminal width in tests. - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - return fmt.Sprintf("%8v", units.HumanSize(float64(p.Current))) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// now returns the current time in UTC, but can be overridden in tests -// by setting JSONProgress.nowFunc to a custom function. -func (p *JSONProgress) now() time.Time { - if p.nowFunc != nil { - return p.nowFunc() - } - return time.Now().UTC() -} - -// width returns the current terminal's width, but can be overridden -// in tests by setting JSONProgress.winSize to a non-zero value. -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -func clearLine(out io.Writer) { - eraseMode := aec.EraseModes.All - cl := aec.EraseLine(eraseMode) - fmt.Fprint(out, cl) -} - -func cursorUp(out io.Writer, l uint) { - fmt.Fprint(out, aec.Up(l)) -} - -func cursorDown(out io.Writer, l uint) { - fmt.Fprint(out, aec.Down(l)) -} - -// Display prints the JSONMessage to out. If isTerminal is true, it erases -// the entire current line when displaying the progressbar. It returns an -// error if the [JSONMessage.Error] field is non-nil. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - clearLine(out) - endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream reads a JSON message stream from in, and writes -// each [JSONMessage] to out. It returns an error if an invalid JSONMessage -// is received, or if a JSONMessage containers a non-zero [JSONMessage.Error]. -// -// Presentation of the JSONMessage depends on whether a terminal is attached, -// and on the terminal width. Progress bars ([JSONProgress]) are suppressed -// on narrower terminals (< 110 characters). -// -// - isTerminal describes if out is a terminal, in which case it prints -// a newline ("\n") at the end of each line and moves the cursor while -// displaying. -// - terminalFd is the fd of the current terminal (if any), and used -// to get the terminal width. -// - auxCallback allows handling the [JSONMessage.Aux] field. It is -// called if a JSONMessage contains an Aux field, in which case -// DisplayJSONMessagesStream does not present the JSONMessage. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]uint) - ) - - for { - var diff uint - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = uint(len(ids)) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = uint(len(ids)) - line - if isTerminal { - cursorUp(out, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]uint) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - cursorDown(out, diff) - } - if err != nil { - return err - } - } - return nil -} - -// Stream is an io.Writer for output with utilities to get the output's file -// descriptor and to detect wether it's a terminal. -// -// it is subset of the streams.Out type in -// https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out -type Stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output Stream. It is -// used by the Docker CLI to print JSONMessage streams. -func DisplayJSONMessagesToStream(in io.Reader, stream Stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 1c5dde521..000000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package longpath introduces some constants and helper functions for handling -// long paths in Windows. -// -// Long paths are expected to be prepended with "\\?\" and followed by either a -// drive letter, a UNC server\share, or a volume identifier. -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "os" - "runtime" - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix adds the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} - -// MkdirTemp is the equivalent of [os.MkdirTemp], except that on Windows -// the result is in Windows longpath format. On Unix systems it is -// equivalent to [os.MkdirTemp]. -func MkdirTemp(dir, prefix string) (string, error) { - tempDir, err := os.MkdirTemp(dir, prefix) - if err != nil { - return "", err - } - if runtime.GOOS != "windows" { - return tempDir, nil - } - return AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go index dd75a49f3..f685892c1 100644 --- a/vendor/github.com/docker/docker/registry/auth.go +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -1,17 +1,18 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "net/http" "net/url" "strings" "time" + "github.com/containerd/log" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // AuthClientID is used the ClientID used for the token server @@ -74,7 +75,7 @@ func loginV2(authConfig *registry.AuthConfig, endpoint APIEndpoint, userAgent st creds = loginCredentialStore{authConfig: &credentialAuthConfig} ) - logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) + log.G(context.TODO()).Debugf("attempting v2 login to registry endpoint %s", endpointStr) loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) if err != nil { @@ -124,8 +125,10 @@ func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifi }, nil } -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. +// ConvertToHostname normalizes a registry URL which has http|https prepended +// to just its hostname. It is used to match credentials, which may be either +// stored as hostname or as hostname including scheme (in legacy configuration +// files). func ConvertToHostname(url string) string { stripped := url if strings.HasPrefix(url, "http://") { @@ -146,8 +149,8 @@ func ResolveAuthConfig(authConfigs map[string]registry.AuthConfig, index *regist // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == ConvertToHostname(registry) { + for registryURL, ac := range authConfigs { + if configKey == ConvertToHostname(registryURL) { return ac } } diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index 2766306ac..84b0a63ad 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -1,15 +1,16 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "net" "net/url" "regexp" "strconv" "strings" - "github.com/docker/distribution/reference" + "github.com/containerd/log" + "github.com/distribution/reference" "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" ) // ServiceOptions holds command line options. @@ -197,10 +198,10 @@ skip: return err } if strings.HasPrefix(strings.ToLower(r), "http://") { - logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) + log.G(context.TODO()).Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) r = r[7:] } else if strings.HasPrefix(strings.ToLower(r), "https://") { - logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) + log.G(context.TODO()).Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) r = r[8:] } else if hasScheme(r) { return invalidParamf("insecure registry %s should not contain '://'", r) @@ -319,7 +320,8 @@ func isCIDRMatch(cidrs []*registry.NetIPNet, URLHost string) bool { return false } -// ValidateMirror validates an HTTP(S) registry mirror +// ValidateMirror validates an HTTP(S) registry mirror. It is used by the daemon +// to validate the daemon configuration. func ValidateMirror(val string) (string, error) { uri, err := url.Parse(val) if err != nil { @@ -328,8 +330,8 @@ func ValidateMirror(val string) (string, error) { if uri.Scheme != "http" && uri.Scheme != "https" { return "", invalidParamf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) } - if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { - return "", invalidParamf("invalid mirror: path, query, or fragment at end of the URI %q", uri) + if uri.RawQuery != "" || uri.Fragment != "" { + return "", invalidParamf("invalid mirror: query or fragment at end of the URI %q", uri) } if uri.User != nil { // strip password from output @@ -339,7 +341,8 @@ func ValidateMirror(val string) (string, error) { return strings.TrimSuffix(val, "/") + "/", nil } -// ValidateIndexName validates an index name. +// ValidateIndexName validates an index name. It is used by the daemon to +// validate the daemon configuration. func ValidateIndexName(val string) (string, error) { // TODO: upstream this to check to reference package if val == "index.docker.io" { @@ -425,24 +428,10 @@ func newRepositoryInfo(config *serviceConfig, name reference.Named) (*Repository }, nil } -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. +// ParseRepositoryInfo performs the breakdown of a repository name into a +// [RepositoryInfo], but lacks registry configuration. +// +// It is used by the Docker cli to interact with registry-related endpoints. func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { return newRepositoryInfo(emptyServiceConfig, reposName) } - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -// -// TODO(thaJeztah) this function is only used by the CLI, and used to get -// information of the registry (to provide credentials if needed). We should -// move this function (or equivalent) to the CLI, as it's doing too much just -// for that. -func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go index 898c6b8a5..214204930 100644 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package registry // import "github.com/docker/docker/registry" diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go deleted file mode 100644 index 56257dc79..000000000 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ /dev/null @@ -1,185 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "encoding/json" - "io" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" -) - -// v1PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type v1PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// v1Endpoint stores basic information about a V1 registry endpoint. -type v1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// newV1Endpoint parses the given address to return a registry endpoint. -// TODO: remove. This is only used by search. -func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, headers) - if err != nil { - return nil, err - } - - err = validateEndpoint(endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *v1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return invalidParamf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.ping(); err2 == nil { - return nil - } - - return invalidParamf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - address = strings.TrimSuffix(address, "/") - chunks := strings.Split(address, "/") - apiVersionStr := chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", invalidParamf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, headers http.Header) (*v1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, invalidParam(err) - } - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := newTransport(tlsConfig) - - return &v1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: uri, - client: httpClient(transport.NewTransport(tr, Headers("", headers)...)), - }, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *v1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// ping returns a v1PingResult which indicates whether the registry is standalone or not. -func (e *v1Endpoint) ping() (v1PingResult, error) { - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return v1PingResult{}, nil - } - - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - pingURL := e.String() + "_ping" - req, err := http.NewRequest(http.MethodGet, pingURL, nil) - if err != nil { - return v1PingResult{}, invalidParam(err) - } - - resp, err := e.client.Do(req) - if err != nil { - return v1PingResult{}, invalidParam(err) - } - - defer resp.Body.Close() - - jsonString, err := io.ReadAll(resp.Body) - if err != nil { - return v1PingResult{}, invalidParamWrapf(err, "error while reading response from %s", pingURL) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := v1PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.WithError(err).Debug("error unmarshaling _ping response") - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - info.Version = hdr - } - logrus.Debugf("v1PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("v1PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index 5ff39ce5e..7866dcd0d 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -2,6 +2,7 @@ package registry // import "github.com/docker/docker/registry" import ( + "context" "crypto/tls" "net" "net/http" @@ -10,9 +11,9 @@ import ( "strings" "time" + "github.com/containerd/log" "github.com/docker/distribution/registry/client/transport" "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" ) // HostCertsDir returns the config directory for a specific host. @@ -29,7 +30,7 @@ func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { if isSecure && CertsDir() != "" { hostDir := HostCertsDir(hostname) - logrus.Debugf("hostDir: %s", hostDir) + log.G(context.TODO()).Debugf("hostDir: %s", hostDir) if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { return nil, err } @@ -65,7 +66,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { } tlsConfig.RootCAs = systemPool } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + log.G(context.TODO()).Debugf("crt: %s", filepath.Join(directory, f.Name())) data, err := os.ReadFile(filepath.Join(directory, f.Name())) if err != nil { return err @@ -75,7 +76,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { if strings.HasSuffix(f.Name(), ".cert") { certName := f.Name() keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + log.G(context.TODO()).Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { return invalidParamf("missing key %s for client certificate %s. CA certificates must use the extension .crt", keyName, certName) } @@ -88,7 +89,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + log.G(context.TODO()).Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { return invalidParamf("missing client certificate %s for key %s", certName, keyName) } @@ -112,51 +113,6 @@ func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModif return modifiers } -// httpClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func httpClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if len(via) != 0 && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - // newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the // default TLS configuration. func newTransport(tlsConfig *tls.Config) *http.Transport { diff --git a/vendor/github.com/docker/docker/registry/search.go b/vendor/github.com/docker/docker/registry/search.go index 60b86ea22..75a544410 100644 --- a/vendor/github.com/docker/docker/registry/search.go +++ b/vendor/github.com/docker/docker/registry/search.go @@ -1,4 +1,4 @@ -package registry // import "github.com/docker/docker/registry" +package registry import ( "context" @@ -6,17 +6,16 @@ import ( "strconv" "strings" + "github.com/containerd/log" + "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" - - "github.com/docker/distribution/registry/client/auth" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var acceptedSearchFilterTags = map[string]bool{ - "is-automated": true, + "is-automated": true, // Deprecated: the "is_automated" field is deprecated and will always be false in the future. "is-official": true, "stars": true, } @@ -28,6 +27,7 @@ func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term s return nil, err } + // TODO(thaJeztah): the "is-automated" field is deprecated; reset the field for the next release (v26.0.0). Return early when using "is-automated=true", and ignore "is-automated=false". isAutomated, err := searchFilters.GetBoolOrDefault("is-automated", false) if err != nil { return nil, err @@ -51,6 +51,7 @@ func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term s } } + // TODO(thaJeztah): the "is-automated" field is deprecated. Reset the field for the next release (v26.0.0) if any "true" values are present. unfilteredResult, err := s.searchUnfiltered(ctx, term, limit, authConfig, headers) if err != nil { return nil, err @@ -59,7 +60,7 @@ func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term s filteredResults := []registry.SearchResult{} for _, result := range unfilteredResult.Results { if searchFilters.Contains("is-automated") { - if isAutomated != result.IsAutomated { + if isAutomated != result.IsAutomated { //nolint:staticcheck // ignore SA1019 for old API versions. continue } } @@ -126,7 +127,7 @@ func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, v2Client.CheckRedirect = endpoint.client.CheckRedirect v2Client.Jar = endpoint.client.Jar - logrus.Debugf("using v2 client for search to %s", endpoint.URL) + log.G(ctx).Debugf("using v2 client for search to %s", endpoint.URL) client = v2Client } else { client = endpoint.client @@ -137,3 +138,26 @@ func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, return newSession(client, endpoint).searchRepositories(remoteName, limit) } + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), + // use the default Docker Hub registry (docker.io) + return IndexName, reposName + } + return nameParts[0], nameParts[1] +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +// +// TODO(thaJeztah) this function is only used by the CLI, and used to get +// information of the registry (to provide credentials if needed). We should +// move this function (or equivalent) to the CLI, as it's doing too much just +// for that. +func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + return newIndexInfo(emptyServiceConfig, indexName) +} diff --git a/vendor/github.com/docker/docker/registry/search_endpoint_v1.go b/vendor/github.com/docker/docker/registry/search_endpoint_v1.go new file mode 100644 index 000000000..f6c369a93 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/search_endpoint_v1.go @@ -0,0 +1,200 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "context" + "crypto/tls" + "encoding/json" + "net/http" + "net/url" + "strings" + + "github.com/containerd/log" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types/registry" +) + +// v1PingResult contains the information returned when pinging a registry. It +// indicates whether the registry claims to be a standalone registry. +type v1PingResult struct { + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// v1Endpoint stores basic information about a V1 registry endpoint. +type v1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// newV1Endpoint parses the given address to return a registry endpoint. +// TODO: remove. This is only used by search. +func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, headers) + if err != nil { + return nil, err + } + + if endpoint.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fall back to http in case of error) + return endpoint, nil + } + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fall back to HTTP. + return nil, invalidParamf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // registry is insecure and HTTPS failed, fallback to HTTP. + log.G(context.TODO()).WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) + endpoint.URL.Scheme = "http" + if _, err2 := endpoint.ping(); err2 != nil { + return nil, invalidParamf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + } + + return endpoint, nil +} + +// trimV1Address trims the "v1" version suffix off the address and returns +// the trimmed address. It returns an error on "v2" endpoints. +func trimV1Address(address string) (string, error) { + trimmed := strings.TrimSuffix(address, "/") + if strings.HasSuffix(trimmed, "/v2") { + return "", invalidParamf("search is not supported on v2 endpoints: %s", address) + } + return strings.TrimSuffix(trimmed, "/v1"), nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, headers http.Header) (*v1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, invalidParam(err) + } + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := newTransport(tlsConfig) + + return &v1Endpoint{ + IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, + URL: uri, + client: httpClient(transport.NewTransport(tr, Headers("", headers)...)), + }, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *v1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// ping returns a v1PingResult which indicates whether the registry is standalone or not. +func (e *v1Endpoint) ping() (v1PingResult, error) { + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return v1PingResult{}, nil + } + + pingURL := e.String() + "_ping" + log.G(context.TODO()).WithField("url", pingURL).Debug("attempting v1 ping for registry endpoint") + req, err := http.NewRequest(http.MethodGet, pingURL, nil) + if err != nil { + return v1PingResult{}, invalidParam(err) + } + + resp, err := e.client.Do(req) + if err != nil { + return v1PingResult{}, invalidParam(err) + } + + defer resp.Body.Close() + + if v := resp.Header.Get("X-Docker-Registry-Standalone"); v != "" { + info := v1PingResult{} + // Accepted values are "1", and "true" (case-insensitive). + if v == "1" || strings.EqualFold(v, "true") { + info.Standalone = true + } + log.G(context.TODO()).Debugf("v1PingResult.Standalone (from X-Docker-Registry-Standalone header): %t", info.Standalone) + return info, nil + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := v1PingResult{ + Standalone: true, + } + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + log.G(context.TODO()).WithError(err).Debug("error unmarshaling _ping response") + // don't stop here. Just assume sane defaults + } + + log.G(context.TODO()).Debugf("v1PingResult.Standalone: %t", info.Standalone) + return info, nil +} + +// httpClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func httpClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if len(via) != 0 && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/registry/search_session.go b/vendor/github.com/docker/docker/registry/search_session.go new file mode 100644 index 000000000..c334143c6 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/search_session.go @@ -0,0 +1,218 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + // this is required for some certificates + "context" + _ "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + "sync" + + "github.com/containerd/log" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// A session is used to communicate with a V1 registry +type session struct { + indexEndpoint *v1Endpoint + client *http.Client +} + +type authTransport struct { + http.RoundTripper + *registry.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// newAuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func newAuthTransport(base http.RoundTripper, authConfig *registry.AuthConfig, alwaysSetBasicAuth bool) *authTransport { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpoint *v1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + log.G(context.TODO()).Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = newAuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errdefs.System(errors.New("cookiejar.New is not supposed to return an error")) + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, endpoint *v1Endpoint) *session { + return &session{ + client: client, + indexEndpoint: endpoint, + } +} + +// defaultSearchLimit is the default value for maximum number of returned search results. +const defaultSearchLimit = 25 + +// searchRepositories performs a search against the remote repository +func (r *session) searchRepositories(term string, limit int) (*registry.SearchResults, error) { + if limit == 0 { + limit = defaultSearchLimit + } + if limit < 1 || limit > 100 { + return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit) + } + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + log.G(context.TODO()).WithField("url", u).Debug("searchRepositories") + + req, err := http.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, invalidParamWrapf(err, "error building request") + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, errdefs.System(err) + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + // TODO(thaJeztah): return upstream response body for errors (see https://github.com/moby/moby/issues/27286). + return nil, errdefs.Unknown(fmt.Errorf("Unexpected status code %d", res.StatusCode)) + } + result := ®istry.SearchResults{} + err = json.NewDecoder(res.Body).Decode(result) + if err != nil { + return nil, errdefs.System(errors.Wrap(err, "error decoding registry search results")) + } + return result, nil +} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index b848065b3..6881c1105 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -7,10 +7,10 @@ import ( "strings" "sync" - "github.com/docker/distribution/reference" + "github.com/containerd/log" + "github.com/distribution/reference" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" - "github.com/sirupsen/logrus" ) // Service is a registry service. It tracks configuration data such as a list @@ -20,8 +20,8 @@ type Service struct { mu sync.RWMutex } -// NewService returns a new instance of defaultService ready to be -// installed into an engine. +// NewService returns a new instance of [Service] ready to be installed into +// an engine. func NewService(options ServiceOptions) (*Service, error) { config, err := newServiceConfig(options) @@ -35,28 +35,18 @@ func (s *Service) ServiceConfig() *registry.ServiceConfig { return s.config.copy() } -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. -func (s *Service) LoadAllowNondistributableArtifacts(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.loadAllowNondistributableArtifacts(registries) -} - -// LoadMirrors loads registry mirrors for Service -func (s *Service) LoadMirrors(mirrors []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.loadMirrors(mirrors) -} - -// LoadInsecureRegistries loads insecure registries for Service -func (s *Service) LoadInsecureRegistries(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.loadInsecureRegistries(registries) +// ReplaceConfig prepares a transaction which will atomically replace the +// registry service's configuration when the returned commit function is called. +func (s *Service) ReplaceConfig(options ServiceOptions) (commit func(), err error) { + config, err := newServiceConfig(options) + if err != nil { + return nil, err + } + return func() { + s.mu.Lock() + defer s.mu.Unlock() + s.config = config + }, nil } // Auth contacts the public registry with the provided credentials, @@ -64,7 +54,7 @@ func (s *Service) LoadInsecureRegistries(registries []string) error { // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (status, token string, err error) { // TODO Use ctx when searching for repositories - var registryHostName = IndexHostname + registryHostName := IndexHostname if authConfig.ServerAddress != "" { serverAddress := authConfig.ServerAddress @@ -95,24 +85,12 @@ func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, use // Failed to authenticate; don't continue with (non-TLS) endpoints. return status, token, err } - logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint") + log.G(ctx).WithError(err).Infof("Error logging in to endpoint, trying next endpoint") } return "", "", err } -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), - // use the default Docker Hub registry (docker.io) - return IndexName, reposName - } - return nameParts[0], nameParts[1] -} - // ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { @@ -125,7 +103,7 @@ func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, erro type APIEndpoint struct { Mirror bool URL *url.URL - Version APIVersion + Version APIVersion // Deprecated: v1 registries are deprecated, and endpoints are always v2. AllowNondistributableArtifacts bool Official bool TrimHostname bool diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go index c8c545d21..5d09e11c9 100644 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -25,7 +25,7 @@ func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, e } endpoints = append(endpoints, APIEndpoint{ URL: mirrorURL, - Version: APIVersion2, + Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. Mirror: true, TrimHostname: true, TLSConfig: mirrorTLSConfig, @@ -33,7 +33,7 @@ func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, e } endpoints = append(endpoints, APIEndpoint{ URL: DefaultV2Registry, - Version: APIVersion2, + Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. Official: true, TrimHostname: true, TLSConfig: tlsconfig.ServerDefault(), @@ -55,7 +55,7 @@ func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, e Scheme: "https", Host: hostname, }, - Version: APIVersion2, + Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. AllowNondistributableArtifacts: ana, TrimHostname: true, TLSConfig: tlsConfig, @@ -68,7 +68,7 @@ func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, e Scheme: "http", Host: hostname, }, - Version: APIVersion2, + Version: APIVersion2, //nolint:staticcheck // ignore SA1019 (Version is deprecated) to allow potential consumers to transition. AllowNondistributableArtifacts: ana, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go deleted file mode 100644 index 86a5cd9ed..000000000 --- a/vendor/github.com/docker/docker/registry/session.go +++ /dev/null @@ -1,216 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - // this is required for some certificates - _ "crypto/sha512" - "encoding/json" - "fmt" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - "sync" - - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A session is used to communicate with a V1 registry -type session struct { - indexEndpoint *v1Endpoint - client *http.Client -} - -type authTransport struct { - http.RoundTripper - *registry.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// newAuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func newAuthTransport(base http.RoundTripper, authConfig *registry.AuthConfig, alwaysSetBasicAuth bool) *authTransport { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpoint *v1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = newAuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errdefs.System(errors.New("cookiejar.New is not supposed to return an error")) - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, endpoint *v1Endpoint) *session { - return &session{ - client: client, - indexEndpoint: endpoint, - } -} - -// defaultSearchLimit is the default value for maximum number of returned search results. -const defaultSearchLimit = 25 - -// searchRepositories performs a search against the remote repository -func (r *session) searchRepositories(term string, limit int) (*registry.SearchResults, error) { - if limit == 0 { - limit = defaultSearchLimit - } - if limit < 1 || limit > 100 { - return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, invalidParamWrapf(err, "error building request") - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, errdefs.System(err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, errdefs.Unknown(&jsonmessage.JSONError{ - Message: fmt.Sprintf("Unexpected status code %d", res.StatusCode), - Code: res.StatusCode, - }) - } - result := new(registry.SearchResults) - return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") -} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go index 37094737f..54aa0bd19 100644 --- a/vendor/github.com/docker/docker/registry/types.go +++ b/vendor/github.com/docker/docker/registry/types.go @@ -1,12 +1,14 @@ package registry // import "github.com/docker/docker/registry" import ( - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types/registry" ) // APIVersion is an integral representation of an API version (presently // either 1 or 2) +// +// Deprecated: v1 registries are deprecated, and endpoints are always v2. type APIVersion int func (av APIVersion) String() string { @@ -15,8 +17,8 @@ func (av APIVersion) String() string { // API Version identifiers. const ( - APIVersion1 APIVersion = 1 - APIVersion2 APIVersion = 2 + APIVersion1 APIVersion = 1 // Deprecated: v1 registries are deprecated, and endpoints are always v2. + APIVersion2 APIVersion = 2 // Deprecated: v1 registries are deprecated, and endpoints are always v2. ) var apiVersions = map[APIVersion]string{ diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bb7e4e336..000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,242 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerport := parts[n-1] - - switch n { - case 1: - return "", "", containerport - case 2: - return "", parts[0], containerport - case 3: - return parts[0], parts[1], containerport - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 892adf8c6..000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,57 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e..000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go new file mode 100644 index 000000000..f84c624ba --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go @@ -0,0 +1,16 @@ +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go deleted file mode 100644 index 1ca0965e0..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go1.7 - -package tlsconfig - -import ( - "crypto/x509" - "runtime" -) - -// SystemCertPool returns a copy of the system cert pool, -// returns an error if failed to load or empty pool on windows. -func SystemCertPool() (*x509.CertPool, error) { - certpool, err := x509.SystemCertPool() - if err != nil && runtime.GOOS == "windows" { - return x509.NewCertPool(), nil - } - return certpool, err -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c333..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 0ef3fdcb4..606c98a38 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -1,6 +1,7 @@ // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig @@ -9,11 +10,9 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" - "io/ioutil" "os" - - "github.com/pkg/errors" ) // Options represents the information needed to create client and server TLS configurations. @@ -36,7 +35,12 @@ type Options struct { ExclusiveRootPools bool MinVersion uint16 // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted + // if the key is encrypted. + // + // Deprecated: Use of encrypted TLS private keys has been deprecated, and + // will be removed in a future release. Golang has deprecated support for + // legacy PEM encryption (as specified in RFC 1423), as it is insecure by + // design (see https://go-review.googlesource.com/c/go/+/264159). Passphrase string } @@ -53,18 +57,9 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, @@ -72,25 +67,25 @@ func ServerDefault(ops ...func(*tls.Config)) *tls.Config { } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. @@ -108,16 +103,25 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pem, err := ioutil.ReadFile(caFile) + pemData, err := os.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pem) { + if !certPool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil } +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} + // isValidMinVersion checks that the input value is a valid tls minimum version func isValidMinVersion(version uint16) bool { _, ok := allTLSVersions[version] @@ -129,10 +133,10 @@ func isValidMinVersion(version uint16) bool { func adjustMinVersion(options Options, config *tls.Config) error { if options.MinVersion > 0 { if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + return fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion) } if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + return fmt.Errorf("requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) } config.MinVersion = options.MinVersion } @@ -141,9 +145,14 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key +// password when trying to decrypt a TLS private key. +// +// Deprecated: Use of encrypted TLS private keys has been deprecated, and +// will be removed in a future release. Golang has deprecated support for +// legacy PEM encryption (as specified in RFC 1423), as it is insecure by +// design (see https://go-review.googlesource.com/c/go/+/264159). func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError + return errors.Is(err, x509.IncorrectPasswordError) } // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. @@ -157,10 +166,10 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { } var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) } keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) } @@ -176,26 +185,24 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, nil } - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) + cert, err := os.ReadFile(options.CertFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + prKeyBytes, err := os.ReadFile(options.KeyFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } return []tls.Certificate{tlsCert}, nil @@ -215,7 +222,7 @@ func Client(options Options) (*tls.Config, error) { tlsCerts, err := getCert(options) if err != nil { - return nil, err + return nil, fmt.Errorf("could not load X509 key pair: %w", err) } tlsConfig.Certificates = tlsCerts @@ -233,9 +240,9 @@ func Server(options Options) (*tls.Config, error) { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("error reading X509 key pair - make sure the key is not encrypted (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go index 6b4c6a7c0..a82f9fa52 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -1,7 +1,4 @@ -// +build go1.5 - // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47c..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c741..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d60552..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d..000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index c245a8951..000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,154 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[byte]int64 - -var ( - decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} - binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} -) - -var ( - decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} -) - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - // TODO: rewrite to use strings.Cut if there's a space - // once Go < 1.18 is deprecated. - sep := strings.LastIndexAny(sizeStr, "01234567890. ") - if sep == -1 { - // There should be at least a digit. - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - var num, sfx string - if sizeStr[sep] != ' ' { - num = sizeStr[:sep+1] - sfx = sizeStr[sep+1:] - } else { - // Omit the space separator. - num = sizeStr[:sep] - sfx = sizeStr[sep+1:] - } - - size, err := strconv.ParseFloat(num, 64) - if err != nil { - return -1, err - } - // Backward compatibility: reject negative sizes. - if size < 0 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - if len(sfx) == 0 { - return int64(size), nil - } - - // Process the suffix. - - if len(sfx) > 3 { // Too long. - goto badSuffix - } - sfx = strings.ToLower(sfx) - // Trivial case: b suffix. - if sfx[0] == 'b' { - if len(sfx) > 1 { // no extra characters allowed after b. - goto badSuffix - } - return int64(size), nil - } - // A suffix from the map. - if mul, ok := uMap[sfx[0]]; ok { - size *= float64(mul) - } else { - goto badSuffix - } - - // The suffix may have extra "b" or "ib" (e.g. KiB or MB). - switch { - case len(sfx) == 2 && sfx[1] != 'b': - goto badSuffix - case len(sfx) == 3 && sfx[1:] != "ib": - goto badSuffix - } - - return int64(size), nil - -badSuffix: - return -1, fmt.Errorf("invalid suffix: '%s'", sfx) -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc..000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 97bb1a9d6..5edd5a7ca 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,9 +1,5 @@ # Change history of go-restful -## [v3.11.1] - 2024-01-03 - -- remove the dependency on github.com/json-iterator/go (#539) - ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 8a35c9597..9808752ac 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -11,6 +11,12 @@ import ( "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. @@ -128,7 +134,7 @@ type entityJSONAccess struct { // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) + decoder := NewDecoder(req.Request.Body) decoder.UseNumber() return decoder.Decode(v) } @@ -147,7 +153,7 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er } if resp.prettyPrint { // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, "", " ") + output, err := MarshalIndent(v, "", " ") if err != nil { return err } @@ -159,5 +165,5 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er // not-so-pretty resp.Header().Set(HEADER_ContentType, contentType) resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) + return NewEncoder(resp).Encode(v) } diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go index a1819b16a..2e6eca448 100644 --- a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -167,6 +167,19 @@ func Marshal(v any) ([]byte, error) { return buf, nil } +func MarshalEscaped(v any, escape bool) ([]byte, error) { + e := newEncodeState() + defer encodeStatePool.Put(e) + + err := e.marshal(v, encOpts{escapeHTML: escape}) + if err != nil { + return nil, err + } + buf := append([]byte(nil), e.Bytes()...) + + return buf, nil +} + // MarshalIndent is like Marshal but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go index 1442ef29e..5598ce11f 100644 --- a/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go @@ -6,7 +6,7 @@ package json import ( "bytes" - "errors" + "encoding/json" "io" ) @@ -259,27 +259,7 @@ func (enc *Encoder) SetEscapeHTML(on bool) { // RawMessage is a raw encoded JSON value. // It implements Marshaler and Unmarshaler and can // be used to delay JSON decoding or precompute a JSON encoding. -type RawMessage []byte - -// MarshalJSON returns m as the JSON encoding of m. -func (m RawMessage) MarshalJSON() ([]byte, error) { - if m == nil { - return []byte("null"), nil - } - return m, nil -} - -// UnmarshalJSON sets *m to a copy of data. -func (m *RawMessage) UnmarshalJSON(data []byte) error { - if m == nil { - return errors.New("json.RawMessage: UnmarshalJSON on nil pointer") - } - *m = append((*m)[0:0], data...) - return nil -} - -var _ Marshaler = (*RawMessage)(nil) -var _ Unmarshaler = (*RawMessage)(nil) +type RawMessage = json.RawMessage // A Token holds a value of one of these types: // diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go index bbe9f85f2..f79caf313 100644 --- a/vendor/github.com/evanphx/json-patch/v5/merge.go +++ b/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -10,26 +10,26 @@ import ( "github.com/evanphx/json-patch/v5/internal/json" ) -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() +func merge(cur, patch *lazyNode, mergeMerge bool, options *ApplyOptions) *lazyNode { + curDoc, err := cur.intoDoc(options) if err != nil { - pruneNulls(patch) + pruneNulls(patch, options) return patch } - patchDoc, err := patch.intoDoc() + patchDoc, err := patch.intoDoc(options) if err != nil { return patch } - mergeDocs(curDoc, patchDoc, mergeMerge) + mergeDocs(curDoc, patchDoc, mergeMerge, options) return cur } -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { +func mergeDocs(doc, patch *partialDoc, mergeMerge bool, options *ApplyOptions) { for k, v := range patch.obj { if v == nil { if mergeMerge { @@ -45,55 +45,55 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { } doc.obj[k] = nil } else { - _ = doc.remove(k, &ApplyOptions{}) + _ = doc.remove(k, options) } } else { cur, ok := doc.obj[k] if !ok || cur == nil { if !mergeMerge { - pruneNulls(v) + pruneNulls(v, options) } - _ = doc.set(k, v, &ApplyOptions{}) + _ = doc.set(k, v, options) } else { - _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) + _ = doc.set(k, merge(cur, v, mergeMerge, options), options) } } } } -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() +func pruneNulls(n *lazyNode, options *ApplyOptions) { + sub, err := n.intoDoc(options) if err == nil { - pruneDocNulls(sub) + pruneDocNulls(sub, options) } else { ary, err := n.intoAry() if err == nil { - pruneAryNulls(ary) + pruneAryNulls(ary, options) } } } -func pruneDocNulls(doc *partialDoc) *partialDoc { +func pruneDocNulls(doc *partialDoc, options *ApplyOptions) *partialDoc { for k, v := range doc.obj { if v == nil { _ = doc.remove(k, &ApplyOptions{}) } else { - pruneNulls(v) + pruneNulls(v, options) } } return doc } -func pruneAryNulls(ary *partialArray) *partialArray { +func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray { newAry := []*lazyNode{} for _, v := range ary.nodes { if v != nil { - pruneNulls(v) + pruneNulls(v, options) } newAry = append(newAry, v) } @@ -128,11 +128,17 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return nil, errBadJSONPatch } - doc := &partialDoc{} + options := NewApplyOptions() + + doc := &partialDoc{ + opts: options, + } docErr := doc.UnmarshalJSON(docData) - patch := &partialDoc{} + patch := &partialDoc{ + opts: options, + } patchErr := patch.UnmarshalJSON(patchData) @@ -158,7 +164,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { if mergeMerge { doc = patch } else { - doc = pruneDocNulls(patch) + doc = pruneDocNulls(patch, options) } } else { patchAry := &partialArray{} @@ -172,7 +178,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return nil, errBadJSONPatch } - pruneAryNulls(patchAry) + pruneAryNulls(patchAry, options) out, patchErr := json.Marshal(patchAry.nodes) @@ -183,7 +189,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return out, nil } } else { - mergeDocs(doc, patch, mergeMerge) + mergeDocs(doc, patch, mergeMerge, options) } return json.Marshal(doc) diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go index a3bbf1c73..7a7f71c8b 100644 --- a/vendor/github.com/evanphx/json-patch/v5/patch.go +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -38,6 +38,8 @@ var ( ErrInvalid = errors.New("invalid state detected") ErrInvalidIndex = errors.New("invalid index referenced") + ErrExpectedObject = errors.New("invalid value, expected object") + rawJSONArray = []byte("[]") rawJSONObject = []byte("{}") rawJSONNull = []byte("null") @@ -60,6 +62,8 @@ type partialDoc struct { self *lazyNode keys []string obj map[string]*lazyNode + + opts *ApplyOptions } type partialArray struct { @@ -90,6 +94,8 @@ type ApplyOptions struct { // EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation. // Default to false. EnsurePathExistsOnAdd bool + + EscapeHTML bool } // NewApplyOptions creates a default set of options for calls to ApplyWithOptions. @@ -99,6 +105,7 @@ func NewApplyOptions() *ApplyOptions { AccumulatedCopySizeLimit: AccumulatedCopySizeLimit, AllowMissingPathOnRemove: false, EnsurePathExistsOnAdd: false, + EscapeHTML: true, } } @@ -134,16 +141,28 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error { } func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error { + if n.obj == nil { + return ErrExpectedObject + } + if err := buf.WriteByte('{'); err != nil { return err } + escaped := true + + // n.opts should always be set, but in case we missed a case, + // guard. + if n.opts != nil { + escaped = n.opts.EscapeHTML + } + for i, k := range n.keys { if i > 0 { if err := buf.WriteByte(','); err != nil { return err } } - key, err := json.Marshal(k) + key, err := json.MarshalEscaped(k, escaped) if err != nil { return err } @@ -153,7 +172,7 @@ func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error { if err := buf.WriteByte(':'); err != nil { return err } - value, err := json.Marshal(n.obj[k]) + value, err := json.MarshalEscaped(n.obj[k], escaped) if err != nil { return err } @@ -194,11 +213,11 @@ func (n *partialArray) RedirectMarshalJSON() (interface{}, error) { return n.nodes, nil } -func deepCopy(src *lazyNode) (*lazyNode, int, error) { +func deepCopy(src *lazyNode, options *ApplyOptions) (*lazyNode, int, error) { if src == nil { return nil, 0, nil } - a, err := json.Marshal(src) + a, err := json.MarshalEscaped(src, options.EscapeHTML) if err != nil { return nil, 0, err } @@ -216,7 +235,7 @@ func (n *lazyNode) nextByte() byte { return s[0] } -func (n *lazyNode) intoDoc() (*partialDoc, error) { +func (n *lazyNode) intoDoc(options *ApplyOptions) (*partialDoc, error) { if n.which == eDoc { return n.doc, nil } @@ -235,6 +254,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) { return nil, ErrInvalid } + n.doc.opts = options if err != nil { return nil, err } @@ -545,7 +565,7 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s return nil, "" } } else { - doc, err = next.intoDoc() + doc, err = next.intoDoc(options) if err != nil { return nil, "" @@ -557,6 +577,10 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s } func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error { + if d.obj == nil { + return ErrExpectedObject + } + found := false for _, k := range d.keys { if k == key { @@ -579,6 +603,11 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { if key == "" { return d.self, nil } + + if d.obj == nil { + return nil, ErrExpectedObject + } + v, ok := d.obj[key] if !ok { return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) @@ -587,6 +616,10 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { } func (d *partialDoc) remove(key string, options *ApplyOptions) error { + if d.obj == nil { + return ErrExpectedObject + } + _, ok := d.obj[key] if !ok { if options.AllowMissingPathOnRemove { @@ -750,6 +783,7 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error { } else { pd = &partialDoc{ self: val, + opts: options, } } @@ -855,7 +889,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error { newNode := newLazyNode(newRawMessage(rawJSONObject)) doc.add(part, newNode, options) - doc, err = newNode.intoDoc() + doc, err = newNode.intoDoc(options) if err != nil { return err } @@ -868,7 +902,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error { return err } } else { - doc, err = target.intoDoc() + doc, err = target.intoDoc(options) if err != nil { return err @@ -954,6 +988,8 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro if !val.tryAry() { return errors.Wrapf(err, "replace operation value must be object or array") } + } else { + val.doc.opts = options } } @@ -1115,7 +1151,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) } - valCopy, sz, err := deepCopy(val) + valCopy, sz, err := deepCopy(val, options) if err != nil { return errors.Wrapf(err, "error while performing deep copy") } @@ -1202,6 +1238,7 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO } else { pd = &partialDoc{ self: self, + opts: options, } } @@ -1238,11 +1275,18 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO } } - if indent != "" { - return json.MarshalIndent(pd, "", indent) + data, err := json.MarshalEscaped(pd, options.EscapeHTML) + if err != nil { + return nil, err + } + + if indent == "" { + return data, nil } - return json.Marshal(pd) + var buf bytes.Buffer + json.Indent(&buf, data, "", indent) + return buf.Bytes(), nil } // From http://tools.ietf.org/html/rfc6901#section-4 : diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 000000000..f1c181ec9 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 000000000..38cb9ae10 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c794b2b0c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 000000000..de0965e12 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 000000000..eaa850492 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 000000000..af0a79507 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 000000000..9c05146d1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 000000000..823bff12c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 000000000..ea0f39e24 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 000000000..ec038a49e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 000000000..85842ac73 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 000000000..44afb8660 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 000000000..23f68b984 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 000000000..6508e291d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 000000000..8b4b4bbc5 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 000000000..31c39336d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 000000000..de175cee4 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 000000000..507ab6c18 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 000000000..81228acf0 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 000000000..5c4d2b7a4 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 000000000..b40793b95 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a6..7c7f0c69c 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index fb2f866f4..30568e768 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -257,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -264,76 +270,102 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line + buf.WriteByte('{') // for the whole record } + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) } - buf.WriteString(f.parentValuesStr) - continuing = true - } + f.flatten(buf, vals, true) // escape user-provided keys - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } - if f.valuesStr != "" { + if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } - buf.WriteString(f.valuesStr) - continuing = true + buf.WriteString(bodyStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true } - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') } return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { @@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any { // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. -func (f *Formatter) startGroup(group string) { +func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. - if group == "" { + if name == "" { return } - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. - f.group = group - f.groupDepth++ + f.groupName = name f.valuesStr = "" f.values = nil } @@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 000000000..e7f28ed6b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 03555184d..2b2e46310 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -16,9 +16,130 @@ package swag import ( "sort" + "strings" "sync" ) +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + // indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. // Since go1.9, this may be implemented with sync.Map. type indexOfInitialisms struct { @@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) { sort.Sort(sort.Reverse(byInitialism(result))) return } + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb..8bb64ac32 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7d..274727a86 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 000000000..c52d6bf71 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,22 @@ +package swag + +import "unsafe" + +type internalString struct { + Data unsafe.Pointer + Len int +} + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + p := (*internalString)(unsafe.Pointer(&str)).Data + return unsafe.Slice((*byte)(p), len(str)) +} + +/* + * go1.20 version (for when go mod moves to a go1.20 requirement): + +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} +*/ diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 0413f7447..5051401c4 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,22 +105,6 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { return strings.TrimSpace(str) @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,26 +162,31 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { + out := make([]string, 0, len(*in)) + for _, w := range *in { original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) @@ -251,6 +194,8 @@ func ToHumanNameTitle(name string) string { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md deleted file mode 100644 index 61d8ebffc..000000000 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ /dev/null @@ -1,364 +0,0 @@ -# Changelog - -## Release 3.2.0 (2020-12-14) - -### Added - -- #211: Added randInt function (thanks @kochurovro) -- #223: Added fromJson and mustFromJson functions (thanks @mholt) -- #242: Added a bcrypt function (thanks @robbiet480) -- #253: Added randBytes function (thanks @MikaelSmith) -- #254: Added dig function for dicts (thanks @nyarly) -- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) -- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) -- #268: Added and and all functions for testing conditions (thanks @phuslu) -- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf - (thanks @andrewmostello) -- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) -- #270: Extend certificate functions to handle non-RSA keys + add support for - ed25519 keys (thanks @misberner) - -### Changed - -- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer -- Using semver 3.1.1 and mergo 0.3.11 - -### Fixed - -- #249: Fix htmlDateInZone example (thanks @spawnia) - -NOTE: The dependency github.com/imdario/mergo reverted the breaking change in -0.3.9 via 0.3.10 release. - -## Release 3.1.0 (2020-04-16) - -NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 -that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. - -### Added - -- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) -- #224: Added duration filter (thanks @frebib) -- #205: Added `seq` function (thanks @thadc23) - -### Changed - -- #203: Unlambda functions with correct signature (thanks @muesli) -- #236: Updated the license formatting for GitHub display purposes -- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 - as it causes a breaking change for sprig. That issue is tracked at - https://github.com/imdario/mergo/issues/139 - -### Fixed - -- #229: Fix `seq` example in docs (thanks @kalmant) - -## Release 3.0.2 (2019-12-13) - -### Fixed - -- #220: Updating to semver v3.0.3 to fix issue with <= ranges -- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) - -## Release 3.0.1 (2019-12-08) - -### Fixed - -- #212: Updated semver fixing broken constraint checking with ^0.0 - -## Release 3.0.0 (2019-10-02) - -### Added - -- #187: Added durationRound function (thanks @yjp20) -- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) -- #193: Added toRawJson support (thanks @Dean-Coakley) -- #197: Added get support to dicts (thanks @Dean-Coakley) - -### Changed - -- #186: Moving dependency management to Go modules -- #186: Updated semver to v3. This has changes in the way ^ is handled -- #194: Updated documentation on merging and how it copies. Added example using deepCopy -- #196: trunc now supports negative values (thanks @Dean-Coakley) - -## Release 2.22.0 (2019-10-02) - -### Added - -- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) -- #195: Added deepCopy function for use with dicts - -### Changed - -- Updated merge and mergeOverwrite documentation to explain copying and how to - use deepCopy with it - -## Release 2.21.0 (2019-09-18) - -### Added - -- #122: Added encryptAES/decryptAES functions (thanks @n0madic) -- #128: Added toDecimal support (thanks @Dean-Coakley) -- #169: Added list contcat (thanks @astorath) -- #174: Added deepEqual function (thanks @bonifaido) -- #170: Added url parse and join functions (thanks @astorath) - -### Changed - -- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify - -### Fixed - -- #172: Fix semver wildcard example (thanks @piepmatz) -- #175: Fix dateInZone doc example (thanks @s3than) - -## Release 2.20.0 (2019-06-18) - -### Added - -- #164: Adding function to get unix epoch for a time (@mattfarina) -- #166: Adding tests for date_in_zone (@mattfarina) - -### Changed - -- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) -- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) -- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) - -### Fixed - -## Release 2.19.0 (2019-03-02) - -IMPORTANT: This release reverts a change from 2.18.0 - -In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. - -We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. - -### Changed - -- Fix substr panic 35fb796 (Alexey igrychev) -- Remove extra period 1eb7729 (Matthew Lorimor) -- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) -- README edits/fixes/suggestions 08fe136 (Lauri Apple) - - -## Release 2.18.0 (2019-02-12) - -### Added - -- Added mergeOverwrite function -- cryptographic functions that use secure random (see fe1de12) - -### Changed - -- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) -- Handle has for nil list 9c10885 (Daniel Cohen) -- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) -- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) -- Replace outdated goutils imports 01893d2 (Matthew Lorimor) -- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) -- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) - -### Fixed - -- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) -- Fix substr var names and comments d581f80 (Dean Coakley) -- Fix substr documentation 2737203 (Dean Coakley) - -## Release 2.17.1 (2019-01-03) - -### Fixed - -The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. - -## Release 2.17.0 (2019-01-03) - -### Added - -- adds alder32sum function and test 6908fc2 (marshallford) -- Added kebabcase function ca331a1 (Ilyes512) - -### Changed - -- Update goutils to 1.1.0 4e1125d (Matt Butcher) - -### Fixed - -- Fix 'has' documentation e3f2a85 (dean-coakley) -- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) -- fixes spelling errors... not sure how that happened 4cf188a (marshallford) - -## Release 2.16.0 (2018-08-13) - -### Added - -- add splitn function fccb0b0 (Helgi Þorbjörnsson) -- Add slice func df28ca7 (gongdo) -- Generate serial number a3bdffd (Cody Coons) -- Extract values of dict with values function df39312 (Lawrence Jones) - -### Changed - -- Modify panic message for list.slice ae38335 (gongdo) -- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) -- Remove duplicated documentation 1d97af1 (Matthew Fisher) -- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) - -### Fixed - -- Fix file permissions c5f40b5 (gongdo) -- Fix example for buildCustomCert 7779e0d (Tin Lam) - -## Release 2.15.0 (2018-04-02) - -### Added - -- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -- #66: Add ternary function (thanks @binoculars) -- #67: Allow keys function to take multiple dicts (thanks @binoculars) -- #89: Added sha1sum to crypto function (thanks @benkeil) -- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -- #92: Add travis testing for go 1.10 -- #93: Adding appveyor config for windows testing - -### Changed - -- #90: Updating to more recent dependencies -- #73: replace satori/go.uuid with google/uuid (thanks @petterw) - -### Fixed - -- #76: Fixed documentation typos (thanks @Thiht) -- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older - -## Release 2.14.1 (2017-12-01) - -### Fixed - -- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -- #61: Removing line with {{ due to blocking github pages genertion -- #64: Update the list functions to handle int, string, and other slices for compatibility - -## Release 2.14.0 (2017-10-06) - -This new version of Sprig adds a set of functions for generating and working with SSL certificates. - -- `genCA` generates an SSL Certificate Authority -- `genSelfSignedCert` generates an SSL self-signed certificate -- `genSignedCert` generates an SSL certificate and key based on a given CA - -## Release 2.13.0 (2017-09-18) - -This release adds new functions, including: - -- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -- `floor`, `ceil`, and `round` math functions -- `toDate` converts a string to a date -- `nindent` is just like `indent` but also prepends a new line -- `ago` returns the time from `time.Now` - -### Added - -- #40: Added basic regex functionality (thanks @alanquillin) -- #41: Added ceil floor and round functions (thanks @alanquillin) -- #48: Added toDate function (thanks @andreynering) -- #50: Added nindent function (thanks @binoculars) -- #46: Added ago function (thanks @slayer) - -### Changed - -- #51: Updated godocs to include new string functions (thanks @curtisallen) -- #49: Added ability to merge multiple dicts (thanks @binoculars) - -## Release 2.12.0 (2017-05-17) - -- `snakecase`, `camelcase`, and `shuffle` are three new string functions -- `fail` allows you to bail out of a template render when conditions are not met - -## Release 2.11.0 (2017-05-02) - -- Added `toJson` and `toPrettyJson` -- Added `merge` -- Refactored documentation - -## Release 2.10.0 (2017-03-15) - -- Added `semver` and `semverCompare` for Semantic Versions -- `list` replaces `tuple` -- Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` - -## Release 2.9.0 (2017-02-23) - -- Added `splitList` to split a list -- Added crypto functions of `genPrivateKey` and `derivePassword` - -## Release 2.8.0 (2016-12-21) - -- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) - -## Release 2.7.0 (2016-12-01) - -- Added `sha256sum` to generate a hash of an input -- Added functions to convert a numeric or string to `int`, `int64`, `float64` - -## Release 2.6.0 (2016-10-03) - -- Added a `uuidv4` template function for generating UUIDs inside of a template. - -## Release 2.5.0 (2016-08-19) - -- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 - -## Release 2.4.0 (2016-08-16) - -- Adds two functions: `until` and `untilStep` - -## Release 2.3.0 (2016-06-21) - -- cat: Concatenate strings with whitespace separators. -- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" -- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" -- indent: Indent blocks of text in a way that is sensitive to "\n" characters. - -## Release 2.2.0 (2016-04-21) - -- Added a `genPrivateKey` function (Thanks @bacongobbler) - -## Release 2.1.0 (2016-03-30) - -- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. -- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. - -## Release 2.0.0 (2016-03-29) - -Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. - -- `min` complements `max` (formerly `biggest`) -- `empty` indicates that a value is the empty value for its type -- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` -- Date formatters have been added for HTML dates (as used in `date` input fields) -- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). - -## Release 1.2.0 (2016-02-01) - -- Added quote and squote -- Added b32enc and b32dec -- add now takes varargs -- biggest now takes varargs - -## Release 1.1.0 (2015-12-29) - -- Added #4: Added contains function. strings.Contains, but with the arguments - switched to simplify common pipelines. (thanks krancour) -- Added Travis-CI testing support - -## Release 1.0.0 (2015-12-23) - -- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/README.md deleted file mode 100644 index 72579471f..000000000 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) - -Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with -all functions that depend on external (non standard library) or crypto packages -removed. -The reason for this is to make this library more lightweight. Most of these -functions (specially crypto ones) are not needed on most apps, but costs a lot -in terms of binary size and compilation time. - -## Usage - -**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for -detailed instructions and code snippets for the >100 template functions available. - -**Go developers**: If you'd like to include Slim-Sprig as a library in your program, -our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). - -For standard usage, read on. - -### Load the Slim-Sprig library - -To load the Slim-Sprig `FuncMap`: - -```go - -import ( - "html/template" - - "github.com/go-task/slim-sprig" -) - -// This example illustrates that the FuncMap *must* be set before the -// templates themselves are loaded. -tpl := template.Must( - template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") -) -``` - -### Calling the functions inside of templates - -By convention, all functions are lowercase. This seems to follow the Go -idiom for template functions (as opposed to template methods, which are -TitleCase). For example, this: - -``` -{{ "hello!" | upper | repeat 5 }} -``` - -produces this: - -``` -HELLO!HELLO!HELLO!HELLO!HELLO! -``` - -## Principles Driving Our Function Selection - -We followed these principles to decide which functions to add and how to implement them: - -- Use template functions to build layout. The following - types of operations are within the domain of template functions: - - Formatting - - Layout - - Simple type conversions - - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -- Template functions should not return errors unless there is no way to print - a sensible value. For example, converting a string to an integer should not - produce an error if conversion fails. Instead, it should display a default - value. -- Simple math is necessary for grid layouts, pagers, and so on. Complex math - (anything other than arithmetic) should be done outside of templates. -- Template functions only deal with the data passed into them. They never retrieve - data from a source. -- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md new file mode 100644 index 000000000..2ce45dd4e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -0,0 +1,383 @@ +# Changelog + +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/v3/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md new file mode 100644 index 000000000..b5ab56425 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223b..8e6346bb1 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f5736..fdff3fdb4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index ab7f03ae2..8ce9d3cf3 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -17,6 +17,7 @@ package profile import ( "errors" "sort" + "strings" ) func (p *Profile) decoder() []decoder { @@ -121,6 +122,7 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -155,6 +157,7 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -183,12 +186,13 @@ var profileDecoder = []decoder{ // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) - x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + x.Line = b.tmpLines[:0] // Use shared space temporarily pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) - var tmp []Line - x.Line = append(tmp, x.Line...) // Shrink to allocated size + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) return err }, // repeated Function function = 5 @@ -235,6 +239,8 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -252,6 +258,14 @@ func (p *Profile) postDecode() error { } else { mappings[m.ID] = m } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } } functions := make(map[uint64]*Function, len(p.Function)) @@ -298,41 +312,52 @@ func (p *Profile) postDecode() error { st.Unit, err = getString(p.stringTable, &st.unitX, err) } + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + for _, s := range p.Sample { - labels := make(map[string][]string, len(s.labelX)) - numLabels := make(map[string][]int64, len(s.labelX)) - numUnits := make(map[string][]string, len(s.labelX)) - for _, l := range s.labelX { - var key, value string - key, err = getString(p.stringTable, &l.keyX, err) - if l.strX != 0 { - value, err = getString(p.stringTable, &l.strX, err) - labels[key] = append(labels[key], value) - } else if l.numX != 0 || l.unitX != 0 { - numValues := numLabels[key] - units := numUnits[key] - if l.unitX != 0 { - var unit string - unit, err = getString(p.stringTable, &l.unitX, err) - units = padStringArray(units, len(numValues)) - numUnits[key] = append(units, unit) + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) } - numLabels[key] = append(numLabels[key], l.numX) } - } - if len(labels) > 0 { - s.Label = labels - } - if len(numLabels) > 0 { - s.NumLabel = numLabels - for key, units := range numUnits { - if len(units) > 0 { - numUnits[key] = padStringArray(units, len(numLabels[key])) + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } } + s.NumUnit = numUnits } - s.NumUnit = numUnits } - s.Location = make([]*Location, len(s.locationIDX)) + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] @@ -363,6 +388,7 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } @@ -509,6 +535,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -517,6 +544,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go index ea8e66c68..c794b9390 100644 --- a/vendor/github.com/google/pprof/profile/filter.go +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -22,6 +22,10 @@ import "regexp" // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53c..4580bab18 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 0c8f3bb5b..8d07fd6c2 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) { // // The general format for profilez samples is a sequence of words in // binary format. The first words are a header with the following data: -// 1st word -- 0 -// 2nd word -- 3 -// 3rd word -- 0 if a c++ application, 1 if a java application. -// 4th word -- Sampling period (in microseconds). -// 5th word -- Padding. +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. func parseCPU(b []byte) (*Profile, error) { var parse func([]byte) (uint64, []byte) var n1, n2, n3, n4, n5 uint64 @@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) { // // profilez samples are a repeated sequence of stack frames of the // form: -// 1st word -- The number of times this stack was encountered. -// 2nd word -- The size of the stack (StackSize). -// 3rd word -- The first address on the stack. -// ... -// StackSize + 2 -- The last address on the stack +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// // The last stack trace is of the form: -// 1st word -- 0 -// 2nd word -- 1 -// 3rd word -- 0 +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 // // Addresses from stack traces may point to the next instruction after // each call. Optionally adjust by -1 to land somewhere on the actual @@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { - line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 9978e7330..ba4d74640 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -15,6 +15,7 @@ package profile import ( + "encoding/binary" "fmt" "sort" "strconv" @@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) { for _, src := range srcs { // Clear the profile-specific hash tables - pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.locationsByID = makeLocationIDMap(len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) @@ -136,7 +137,7 @@ type profileMerger struct { p *Profile // Memoization tables within a profile. - locationsByID map[uint64]*Location + locationsByID locationIDMap functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo @@ -153,6 +154,16 @@ type mapInfo struct { } func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), @@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.NumLabel[k] = vv s.NumUnit[k] = uu } - // Check memoization table. Must be done on the remapped location to - // account for the remapped mapping. Add current values to the - // existing sample. - k := s.key() - if ss, ok := pm.samples[k]; ok { - for i, v := range src.Value { - ss.Value[i] += v - } - return ss - } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } -// key generates sampleKey to be used as a key for maps. -func (sample *Sample) key() sampleKey { - ids := make([]string, len(sample.Location)) - for i, l := range sample.Location { - ids[i] = strconv.FormatUint(l.ID, 16) +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } } + putNumber(0) // Delimiter - labels := make([]string, 0, len(sample.Label)) - for k, v := range sample.Label { - labels = append(labels, fmt.Sprintf("%q%q", k, v)) + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } } - sort.Strings(labels) - numlabels := make([]string, 0, len(sample.NumLabel)) - for k, v := range sample.NumLabel { - numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } } - sort.Strings(numlabels) - return sampleKey{ - strings.Join(ids, "|"), - strings.Join(labels, ""), - strings.Join(numlabels, ""), + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } -type sampleKey struct { - locations string - labels string - numlabels string +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } func (pm *profileMerger) mapLocation(src *Location) *Location { @@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { return nil } - if l, ok := pm.locationsByID[src.ID]; ok { + if l := pm.locationsByID.get(src.ID); l != nil { return l } @@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { - pm.locationsByID[src.ID] = ll + pm.locationsByID.set(src.ID, ll) return ll } - pm.locationsByID[src.ID] = l + pm.locationsByID.set(src.ID, l) pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l @@ -269,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -303,16 +361,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { return mi } m := &Mapping{ - ID: uint64(len(pm.p.Mapping) + 1), - Start: src.Start, - Limit: src.Limit, - Offset: src.Offset, - File: src.File, - BuildID: src.BuildID, - HasFunctions: src.HasFunctions, - HasFilenames: src.HasFilenames, - HasLineNumbers: src.HasLineNumbers, - HasInlineFrames: src.HasInlineFrames, + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, } pm.p.Mapping = append(pm.p.Mapping, m) @@ -360,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } @@ -416,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} + var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -434,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } + if docURL == "" { + docURL = s.DocURL + } } p := &Profile{ @@ -449,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, + DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil @@ -479,3 +544,131 @@ func (p *Profile) compatible(pb *Profile) error { func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 2590c8ddb..f47a24390 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "math" "path/filepath" "regexp" @@ -40,6 +39,7 @@ type Profile struct { Location []*Location Function []*Function Comments []string + DocURL string DropFrames string KeepFrames string @@ -54,6 +54,7 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 + docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -73,9 +74,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -106,6 +121,15 @@ type Mapping struct { fileX int64 buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string } // Location corresponds to Profile.Location @@ -123,6 +147,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -144,7 +169,7 @@ type Function struct { // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -159,7 +184,7 @@ func ParseData(data []byte) (*Profile, error) { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { - data, err = ioutil.ReadAll(gz) + data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) @@ -414,7 +439,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -436,7 +461,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -444,6 +469,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -526,6 +557,9 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -605,10 +639,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" @@ -707,6 +742,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { @@ -785,10 +849,10 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", [vsyscall]" and some others, see the code. +// "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index 539ad3ab3..a15696ba1 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -39,11 +39,12 @@ import ( ) type buffer struct { - field int // field tag - typ int // proto wire type code for field - u64 uint64 - data []byte - tmp [16]byte + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". } type decoder func(*buffer, message) error @@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data - tmp := make([]int64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, int64(u)) + *x = append(*x, int64(u)) } - *x = append(*x, tmp...) return nil } var i int64 @@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding - tmp := make([]uint64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, u) + *x = append(*x, u) } - *x = append(*x, tmp...) return nil } var u uint64 diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index 02d21a818..b2f9fd546 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - funcName := simplifyFunc(fn.Name) - if dropRx.MatchString(funcName) { - if keepRx == nil || !keepRx.MatchString(funcName) { - break - } + if pruneFromHere(fn.Name) { + break } } } diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md index 750c3c7eb..e809c79ab 100644 --- a/vendor/github.com/huandu/xstrings/README.md +++ b/vendor/github.com/huandu/xstrings/README.md @@ -39,8 +39,8 @@ _Keep this table sorted by Function in ascending order._ | [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | | [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | | [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | | [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | | [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | | [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | @@ -50,14 +50,15 @@ _Keep this table sorted by Function in ascending order._ | [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | | [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | | [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | | [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | | [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | | [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | | [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | | [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | | [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToPascalCase](https://godoc.org/github.com/huandu/xstrings#ToPascalCase) | - | [#1](https://github.com/huandu/xstrings/issues/1) | | [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | | [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | | [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go index cba0d0725..5d8cfee47 100644 --- a/vendor/github.com/huandu/xstrings/convert.go +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -13,17 +13,37 @@ import ( // // Some samples. // +// "some_words" => "someWords" +// "http_server" => "httpServer" +// "no_https" => "noHttps" +// "_complex__case_" => "_complex_Case_" +// "some words" => "someWords" +// "GOLANG_IS_GREAT" => "golangIsGreat" +func ToCamelCase(str string) string { + return toCamelCase(str, false) +} + +// ToPascalCase is to convert words separated by space, underscore and hyphen to pascal case. +// +// Some samples. +// // "some_words" => "SomeWords" // "http_server" => "HttpServer" // "no_https" => "NoHttps" // "_complex__case_" => "_Complex_Case_" // "some words" => "SomeWords" -func ToCamelCase(str string) string { +// "GOLANG_IS_GREAT" => "GolangIsGreat" +func ToPascalCase(str string) string { + return toCamelCase(str, true) +} + +func toCamelCase(str string, isBig bool) string { if len(str) == 0 { return "" } buf := &stringBuilder{} + var isFirstRuneUpper bool var r0, r1 rune var size int @@ -33,7 +53,14 @@ func ToCamelCase(str string) string { str = str[size:] if !isConnector(r0) { - r0 = unicode.ToUpper(r0) + isFirstRuneUpper = unicode.IsUpper(r0) + + if isBig { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + } + break } @@ -60,12 +87,25 @@ func ToCamelCase(str string) string { } if isConnector(r1) { + isFirstRuneUpper = unicode.IsUpper(r0) r0 = unicode.ToUpper(r0) } else { + if isFirstRuneUpper { + if unicode.IsUpper(r0) { + r0 = unicode.ToLower(r0) + } else { + isFirstRuneUpper = false + } + } + buf.WriteRune(r1) } } + if isFirstRuneUpper && !isBig { + r0 = unicode.ToLower(r0) + } + buf.WriteRune(r0) return buf.String() } diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml deleted file mode 100644 index 1cfa28cb3..000000000 --- a/vendor/github.com/jmoiron/sqlx/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -# vim: ft=yaml sw=2 ts=2 - -language: go - -# enable database services -services: - - mysql - - postgresql - -# create test database -before_install: - - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' - - psql -c 'create database sqlxtest;' -U postgres - - go get github.com/mattn/goveralls - - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" - - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" - - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" - -# go versions to test -go: - - "1.15.x" - - "1.16.x" - -# run tests w/ coverage -script: - - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/Makefile b/vendor/github.com/jmoiron/sqlx/Makefile new file mode 100644 index 000000000..448b9ddd9 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/Makefile @@ -0,0 +1,30 @@ +.ONESHELL: +SHELL = /bin/sh +.SHELLFLAGS = -ec + +BASE_PACKAGE := github.com/jmoiron/sqlx + +tooling: + go install honnef.co/go/tools/cmd/staticcheck@v0.4.7 + go install golang.org/x/vuln/cmd/govulncheck@v1.0.4 + go install golang.org/x/tools/cmd/goimports@v0.20.0 + +has-changes: + git diff --exit-code --quiet HEAD -- + +lint: + go vet ./... + staticcheck -checks=all ./... + +fmt: + go list -f '{{.Dir}}' ./... | xargs -I {} goimports -local $(BASE_PACKAGE) -w {} + +vuln-check: + govulncheck ./... + +test-race: + go test -v -race -count=1 ./... + +update-dependencies: + go get -u -t -v ./... + go mod tidy diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md index 0d7159290..5bfd231a1 100644 --- a/vendor/github.com/jmoiron/sqlx/README.md +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -1,6 +1,6 @@ # sqlx -[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/jmoiron/sqlx/tree/master.svg?style=shield)](https://dl.circleci.com/status-badge/redirect/gh/jmoiron/sqlx/tree/master) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) sqlx is a library which provides a set of extensions on go's standard `database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go index e2b4e60b2..b80104175 100644 --- a/vendor/github.com/jmoiron/sqlx/doc.go +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -8,5 +8,4 @@ // Additions include scanning into structs, named query support, rebinding // queries for different drivers, convenient shorthands for common error handling // and more. -// package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go index 728aa04d0..6ac447771 100644 --- a/vendor/github.com/jmoiron/sqlx/named.go +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -174,7 +174,7 @@ func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{ arglist := make([]interface{}, 0, len(names)) // grab the indirected value of arg - v := reflect.ValueOf(arg) + var v reflect.Value for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { v = v.Elem() } diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go index 07ad2165d..9ad23f4ed 100644 --- a/vendor/github.com/jmoiron/sqlx/named_context.go +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -1,3 +1,4 @@ +//go:build go1.8 // +build go1.8 package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go index 0b1099428..8ec6a1382 100644 --- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -3,7 +3,6 @@ // allows for Go-compatible named attribute access, including accessing embedded // struct attributes and the ability to use functions and struct tags to // customize field names. -// package reflectx import ( diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go index f7b287683..8259a4feb 100644 --- a/vendor/github.com/jmoiron/sqlx/sqlx.go +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -5,7 +5,6 @@ import ( "database/sql/driver" "errors" "fmt" - "io/ioutil" "path/filepath" "reflect" @@ -51,9 +50,9 @@ func mapper() *reflectx.Mapper { // isScannable takes the reflect.Type and the actual dest value and returns // whether or not it's Scannable. Something is scannable if: -// * it is not a struct -// * it implements sql.Scanner -// * it has no exported fields +// - it is not a struct +// - it implements sql.Scanner +// - it has no exported fields func isScannable(t reflect.Type) bool { if reflect.PtrTo(t).Implements(_scannerInterface) { return true @@ -160,6 +159,8 @@ func mapperFor(i interface{}) *reflectx.Mapper { } var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +//lint:ignore U1000 ignoring this for now var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() // Row is a reimplementation of sql.Row in order to gain access to the underlying @@ -248,6 +249,8 @@ type DB struct { // NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The // driverName of the original database is required for named query support. +// +//lint:ignore ST1003 changing this would break the package interface. func NewDb(db *sql.DB, driverName string) *DB { return &DB{DB: db, driverName: driverName, Mapper: mapper()} } @@ -884,9 +887,9 @@ func structOnlyError(t reflect.Type) error { // then each row must only have one column which can scan into that type. This // allows you to do something like: // -// rows, _ := db.Query("select id from people;") -// var ids []int -// scanAll(rows, &ids, false) +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) // // and ids will be a list of the id results. I realize that this is a desirable // interface to expose to users, but for now it will only be exposed via changes @@ -935,9 +938,9 @@ func scanAll(rows rowsi, dest interface{}, structOnly bool) error { var values []interface{} var m *reflectx.Mapper - switch rows.(type) { + switch rows := rows.(type) { case *Rows: - m = rows.(*Rows).Mapper + m = rows.Mapper default: m = mapper() } diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go index 7aa4dd01e..32621d56d 100644 --- a/vendor/github.com/jmoiron/sqlx/sqlx_context.go +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -1,3 +1,4 @@ +//go:build go1.8 // +build go1.8 package sqlx diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4c28dff46..4528059ca 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,9 +1,8 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 builds: - @@ -32,7 +31,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2d" binary: s2d @@ -59,7 +57,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2sx" binary: s2sx @@ -87,7 +84,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble archives: - @@ -103,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583c..de264c85a 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,51 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -31,6 +76,10 @@ This package provides various compression algorithms. * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -53,7 +102,7 @@ This package provides various compression algorithms. * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -69,6 +118,7 @@ This package provides various compression algorithms. * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -107,7 +157,7 @@ This package provides various compression algorithms. * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -310,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -489,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. @@ -536,6 +586,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. @@ -554,7 +606,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7e..0c7dd4ffe 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b25..0f56b02d7 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a0..2754bac6f 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853fc..5a4412f90 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601..9c28840c3 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { @@ -595,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ad..32a7f401d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a240970..6a5a2988b 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21e..bbca17234 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe6..b7b83164b 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c81a15357..4613724e9 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -201,14 +213,6 @@ encodeLoop: if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } // Try to quick reject if we already have a long match. if m.length > 16 { left := len(src) - int(m.s+m.length) @@ -227,8 +231,10 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if true { + if m.rep <= 0 { // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. tMin := s - e.maxMatchOff if tMin < 0 { tMin = 0 @@ -239,7 +245,14 @@ encodeLoop: l++ } } - + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } cand := match{offset: offset, s: s, length: l, rep: rep} cand.estBits(bitsPerByte) if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { @@ -336,24 +349,31 @@ encodeLoop: } if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) } } // We have a match, we can store the forward value + s = best.s if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } addLiterals(&seq, best.s) // Repeat. If bit 4 is set, this is a non-lit repeat. seq.offset = uint32(best.rep & 3) if debugSequences { - println("repeat sequence", seq, "next s:", s) + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) } blk.sequences = append(blk.sequences, seq) @@ -396,7 +416,6 @@ encodeLoop: // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e0..84a79fde7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -168,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -199,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -230,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -259,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -697,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -727,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -761,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -790,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f7..d36be7bd8 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0f..8f8223cd3 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf81921..20671dcb9 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7e..e47af66e7 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed45..667ca0679 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe4..8adfebb02 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e080..ae7d4d329 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0f..0782b86e3 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd828..c59f17e07 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b99725..f5591fa1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1826,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2391,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -2914,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3581,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc736..066bef2a4 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go deleted file mode 100644 index 7c08e564f..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "google.golang.org/protobuf/proto" -) - -// TODO: Give error package name prefix in next minor release. -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify a decode buffer in the - // next major version. - - // TODO: Consider using error wrapping to annotate error state in pass- - // through cases in the next minor version. - - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil); but if it does, it should - // be treated as no-op according to the Reader contract. - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go deleted file mode 100644 index c318385cb..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go deleted file mode 100644 index e58dd9d29..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "google.golang.org/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify an encode buffer in the - // next major version. - - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE deleted file mode 100644 index a3866a291..000000000 --- a/vendor/github.com/mitchellh/hashstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md deleted file mode 100644 index feb0c2496..000000000 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) - -hashstructure is a Go library for creating a unique hash value -for arbitrary values in Go. - -This can be used to key values in a hash (for use in a map, set, etc.) -that are complex. The most common use case is comparing two values without -sending data across the network, caching values locally (de-dup), and so on. - -## Features - - * Hash any arbitrary Go value, including complex types. - - * Tag a struct field to ignore it and not affect the hash value. - - * Tag a slice type struct field to treat it as a set where ordering - doesn't affect the hash code but the field itself is still taken into - account to create the hash value. - - * Optionally, specify a custom hash function to optimize for speed, collision - avoidance for your data set, etc. - - * Optionally, hash the output of `.String()` on structs that implement fmt.Stringer, - allowing effective hashing of time.Time - - * Optionally, override the hashing process by implementing `Hashable`. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/hashstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). - -A quick code example is shown below: - -```go -type ComplexStruct struct { - Name string - Age uint - Metadata map[string]interface{} -} - -v := ComplexStruct{ - Name: "mitchellh", - Age: 64, - Metadata: map[string]interface{}{ - "car": true, - "location": "California", - "siblings": []string{"Bob", "John"}, - }, -} - -hash, err := hashstructure.Hash(v, nil) -if err != nil { - panic(err) -} - -fmt.Printf("%d", hash) -// Output: -// 2307517237273902113 -``` diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go deleted file mode 100644 index 89dd4d3ea..000000000 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ /dev/null @@ -1,422 +0,0 @@ -package hashstructure - -import ( - "encoding/binary" - "fmt" - "hash" - "hash/fnv" - "reflect" - "time" -) - -// ErrNotStringer is returned when there's an error with hash:"string" -type ErrNotStringer struct { - Field string -} - -// Error implements error for ErrNotStringer -func (ens *ErrNotStringer) Error() string { - return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) -} - -// HashOptions are options that are available for hashing. -type HashOptions struct { - // Hasher is the hash function to use. If this isn't set, it will - // default to FNV. - Hasher hash.Hash64 - - // TagName is the struct tag to look at when hashing the structure. - // By default this is "hash". - TagName string - - // ZeroNil is flag determining if nil pointer should be treated equal - // to a zero value of pointed type. By default this is false. - ZeroNil bool - - // IgnoreZeroValue is determining if zero value fields should be - // ignored for hash calculation. - IgnoreZeroValue bool - - // SlicesAsSets assumes that a `set` tag is always present for slices. - // Default is false (in which case the tag is used instead) - SlicesAsSets bool - - // UseStringer will attempt to use fmt.Stringer aways. If the struct - // doesn't implement fmt.Stringer, it'll fall back to trying usual tricks. - // If this is true, and the "string" tag is also set, the tag takes - // precedense (meaning that if the type doesn't implement fmt.Stringer, we - // panic) - UseStringer bool -} - -// Hash returns the hash value of an arbitrary value. -// -// If opts is nil, then default options will be used. See HashOptions -// for the default values. The same *HashOptions value cannot be used -// concurrently. None of the values within a *HashOptions struct are -// safe to read/write while hashing is being done. -// -// Notes on the value: -// -// * Unexported fields on structs are ignored and do not affect the -// hash value. -// -// * Adding an exported field to a struct with the zero value will change -// the hash value. -// -// For structs, the hashing can be controlled using tags. For example: -// -// struct { -// Name string -// UUID string `hash:"ignore"` -// } -// -// The available tag values are: -// -// * "ignore" or "-" - The field will be ignored and not affect the hash code. -// -// * "set" - The field will be treated as a set, where ordering doesn't -// affect the hash code. This only works for slices. -// -// * "string" - The field will be hashed as a string, only works when the -// field implements fmt.Stringer -// -func Hash(v interface{}, opts *HashOptions) (uint64, error) { - // Create default options - if opts == nil { - opts = &HashOptions{} - } - if opts.Hasher == nil { - opts.Hasher = fnv.New64() - } - if opts.TagName == "" { - opts.TagName = "hash" - } - - // Reset the hash - opts.Hasher.Reset() - - // Create our walker and walk the structure - w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, - ignorezerovalue: opts.IgnoreZeroValue, - sets: opts.SlicesAsSets, - stringer: opts.UseStringer, - } - return w.visit(reflect.ValueOf(v), nil) -} - -type walker struct { - h hash.Hash64 - tag string - zeronil bool - ignorezerovalue bool - sets bool - stringer bool -} - -type visitOpts struct { - // Flags are a bitmask of flags to affect behavior of this visit - Flags visitFlag - - // Information about the struct containing this field - Struct interface{} - StructField string -} - -var timeType = reflect.TypeOf(time.Time{}) - -func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { - t := reflect.TypeOf(0) - - // Loop since these can be wrapped in multiple layers of pointers - // and interfaces. - for { - // If we have an interface, dereference it. We have to do this up - // here because it might be a nil in there and the check below must - // catch that. - if v.Kind() == reflect.Interface { - v = v.Elem() - continue - } - - if v.Kind() == reflect.Ptr { - if w.zeronil { - t = v.Type().Elem() - } - v = reflect.Indirect(v) - continue - } - - break - } - - // If it is nil, treat it like a zero. - if !v.IsValid() { - v = reflect.Zero(t) - } - - // Binary writing can use raw ints, we have to convert to - // a sized-int, we'll choose the largest... - switch v.Kind() { - case reflect.Int: - v = reflect.ValueOf(int64(v.Int())) - case reflect.Uint: - v = reflect.ValueOf(uint64(v.Uint())) - case reflect.Bool: - var tmp int8 - if v.Bool() { - tmp = 1 - } - v = reflect.ValueOf(tmp) - } - - k := v.Kind() - - // We can shortcut numeric values by directly binary writing them - if k >= reflect.Int && k <= reflect.Complex64 { - // A direct hash calculation - w.h.Reset() - err := binary.Write(w.h, binary.LittleEndian, v.Interface()) - return w.h.Sum64(), err - } - - switch v.Type() { - case timeType: - w.h.Reset() - b, err := v.Interface().(time.Time).MarshalBinary() - if err != nil { - return 0, err - } - - err = binary.Write(w.h, binary.LittleEndian, b) - return w.h.Sum64(), err - } - - switch k { - case reflect.Array: - var h uint64 - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - h = hashUpdateOrdered(w.h, h, current) - } - - return h, nil - - case reflect.Map: - var includeMap IncludableMap - if opts != nil && opts.Struct != nil { - if v, ok := opts.Struct.(IncludableMap); ok { - includeMap = v - } - } - - // Build the hash for the map. We do this by XOR-ing all the key - // and value hashes. This makes it deterministic despite ordering. - var h uint64 - for _, k := range v.MapKeys() { - v := v.MapIndex(k) - if includeMap != nil { - incl, err := includeMap.HashIncludeMap( - opts.StructField, k.Interface(), v.Interface()) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - kh, err := w.visit(k, nil) - if err != nil { - return 0, err - } - vh, err := w.visit(v, nil) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - - return h, nil - - case reflect.Struct: - parent := v.Interface() - var include Includable - if impl, ok := parent.(Includable); ok { - include = impl - } - - if impl, ok := parent.(Hashable); ok { - return impl.Hash() - } - - // If we can address this value, check if the pointer value - // implements our interfaces and use that if so. - if v.CanAddr() { - vptr := v.Addr() - parentptr := vptr.Interface() - if impl, ok := parentptr.(Includable); ok { - include = impl - } - - if impl, ok := parentptr.(Hashable); ok { - return impl.Hash() - } - } - - t := v.Type() - h, err := w.visit(reflect.ValueOf(t.Name()), nil) - if err != nil { - return 0, err - } - - l := v.NumField() - for i := 0; i < l; i++ { - if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - - var f visitFlag - fieldType := t.Field(i) - if fieldType.PkgPath != "" { - // Unexported - continue - } - - tag := fieldType.Tag.Get(w.tag) - if tag == "ignore" || tag == "-" { - // Ignore this field - continue - } - - if w.ignorezerovalue { - zeroVal := reflect.Zero(reflect.TypeOf(innerV.Interface())).Interface() - if innerV.Interface() == zeroVal { - continue - } - } - - // if string is set, use the string value - if tag == "string" || w.stringer { - if impl, ok := innerV.Interface().(fmt.Stringer); ok { - innerV = reflect.ValueOf(impl.String()) - } else if tag == "string" { - // We only show this error if the tag explicitly - // requests a stringer. - return 0, &ErrNotStringer{ - Field: v.Type().Field(i).Name, - } - } - } - - // Check if we implement includable and check it - if include != nil { - incl, err := include.HashInclude(fieldType.Name, innerV) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - switch tag { - case "set": - f |= visitFlagSet - } - - kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) - if err != nil { - return 0, err - } - - vh, err := w.visit(innerV, &visitOpts{ - Flags: f, - Struct: parent, - StructField: fieldType.Name, - }) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - } - - return h, nil - - case reflect.Slice: - // We have two behaviors here. If it isn't a set, then we just - // visit all the elements. If it is a set, then we do a deterministic - // hash code. - var h uint64 - var set bool - if opts != nil { - set = (opts.Flags & visitFlagSet) != 0 - } - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - if set || w.sets { - h = hashUpdateUnordered(h, current) - } else { - h = hashUpdateOrdered(w.h, h, current) - } - } - - return h, nil - - case reflect.String: - // Directly hash - w.h.Reset() - _, err := w.h.Write([]byte(v.String())) - return w.h.Sum64(), err - - default: - return 0, fmt.Errorf("unknown kind to hash: %s", k) - } - -} - -func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { - // For ordered updates, use a real hash function - h.Reset() - - // We just panic if the binary writes fail because we are writing - // an int64 which should never be fail-able. - e1 := binary.Write(h, binary.LittleEndian, a) - e2 := binary.Write(h, binary.LittleEndian, b) - if e1 != nil { - panic(e1) - } - if e2 != nil { - panic(e2) - } - - return h.Sum64() -} - -func hashUpdateUnordered(a, b uint64) uint64 { - return a ^ b -} - -// visitFlag is used as a bitmask for affecting visit behavior -type visitFlag uint - -const ( - visitFlagInvalid visitFlag = iota - visitFlagSet = iota << 1 -) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go deleted file mode 100644 index 702d35415..000000000 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ /dev/null @@ -1,22 +0,0 @@ -package hashstructure - -// Includable is an interface that can optionally be implemented by -// a struct. It will be called for each field in the struct to check whether -// it should be included in the hash. -type Includable interface { - HashInclude(field string, v interface{}) (bool, error) -} - -// IncludableMap is an interface that can optionally be implemented by -// a struct. It will be called when a map-type field is found to ask the -// struct if the map item should be included in the hash. -type IncludableMap interface { - HashIncludeMap(field string, k, v interface{}) (bool, error) -} - -// Hashable is an interface that can optionally be implemented by a struct -// to override the hash value. This value will override the hash value for -// the entire struct. Entries in the struct will not be hashed. -type Hashable interface { - Hash() (uint64, error) -} diff --git a/vendor/github.com/mittwald/go-helm-client/.golangci.yml b/vendor/github.com/mittwald/go-helm-client/.golangci.yml new file mode 100644 index 000000000..7ee91ef40 --- /dev/null +++ b/vendor/github.com/mittwald/go-helm-client/.golangci.yml @@ -0,0 +1,20 @@ +run: + timeout: 10m + +linters: + enable: + - contextcheck + - gocritic + - gofmt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - unconvert + +linters-settings: + goimports: + local-prefixes: github.com/mittwald/go-helm-client diff --git a/vendor/github.com/mittwald/go-helm-client/Makefile b/vendor/github.com/mittwald/go-helm-client/Makefile index 0df8b051e..9f830ed8b 100644 --- a/vendor/github.com/mittwald/go-helm-client/Makefile +++ b/vendor/github.com/mittwald/go-helm-client/Makefile @@ -19,22 +19,18 @@ help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -.PHONY: fmt -fmt: ## Run go fmt against code. - go fmt ./... - -.PHONY: vet -vet: ## Run go vet against code. - go vet ./... +.PHONY: lint +lint: ## Run golangci-lint against code. + golangci-lint run ./... ##@ Build .PHONY: build -build: generate fmt vet ## Build binary. +build: generate lint ## Build binary. go build -o bin/client . .PHONY: test -test: generate fmt vet ## Run tests. +test: generate lint ## Run tests. go test ./... -coverprofile cover.out .PHONY: generate @@ -55,7 +51,7 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ## Tool Versions MOCKGEN_VERSION ?= v0.4.0 -CONTROLLER_TOOLS_VERSION ?= v0.13.0 +CONTROLLER_TOOLS_VERSION ?= v0.16.1 .PHONY: mockgen mockgen: $(MOCKGEN) ## Download mockgen locally if necessary. diff --git a/vendor/github.com/mittwald/go-helm-client/client.go b/vendor/github.com/mittwald/go-helm-client/client.go index 3a23113de..a76ab28a7 100644 --- a/vendor/github.com/mittwald/go-helm-client/client.go +++ b/vendor/github.com/mittwald/go-helm-client/client.go @@ -8,10 +8,9 @@ import ( "log" "os" "reflect" + "slices" "strings" - "golang.org/x/exp/slices" - "k8s.io/apimachinery/pkg/api/errors" "github.com/spf13/pflag" @@ -794,7 +793,7 @@ func (c *HelmClient) RunChartTests(releaseName string) (bool, error) { } // Check that there are no test failures - return checkReleaseForTestFailure(rel) == false, nil + return !checkReleaseForTestFailure(rel), nil } // chartExists checks whether a chart is already installed @@ -926,8 +925,10 @@ func mergeInstallOptions(chartSpec *ChartSpec, installOptions *action.Install) { installOptions.Atomic = chartSpec.Atomic installOptions.SkipCRDs = chartSpec.SkipCRDs installOptions.DryRun = chartSpec.DryRun + installOptions.DryRunOption = chartSpec.DryRunOption installOptions.SubNotes = chartSpec.SubNotes installOptions.WaitForJobs = chartSpec.WaitForJobs + installOptions.Labels = chartSpec.Labels } // mergeUpgradeOptions merges values of the provided chart to helm upgrade options used by the client. @@ -941,13 +942,16 @@ func mergeUpgradeOptions(chartSpec *ChartSpec, upgradeOptions *action.Upgrade) { upgradeOptions.Force = chartSpec.Force upgradeOptions.ResetValues = chartSpec.ResetValues upgradeOptions.ReuseValues = chartSpec.ReuseValues + upgradeOptions.ResetThenReuseValues = chartSpec.ResetThenReuseValues upgradeOptions.Recreate = chartSpec.Recreate upgradeOptions.MaxHistory = chartSpec.MaxHistory upgradeOptions.Atomic = chartSpec.Atomic upgradeOptions.CleanupOnFail = chartSpec.CleanupOnFail upgradeOptions.DryRun = chartSpec.DryRun + upgradeOptions.DryRunOption = chartSpec.DryRunOption upgradeOptions.SubNotes = chartSpec.SubNotes upgradeOptions.WaitForJobs = chartSpec.WaitForJobs + upgradeOptions.Labels = chartSpec.Labels } // mergeUninstallReleaseOptions merges values of the provided chart to helm uninstall options used by the client. diff --git a/vendor/github.com/mittwald/go-helm-client/spec.go b/vendor/github.com/mittwald/go-helm-client/spec.go index ffb9ee719..acbd438c3 100644 --- a/vendor/github.com/mittwald/go-helm-client/spec.go +++ b/vendor/github.com/mittwald/go-helm-client/spec.go @@ -1,7 +1,8 @@ package helmclient import ( - "github.com/pkg/errors" + "fmt" + "helm.sh/helm/v3/pkg/getter" "sigs.k8s.io/yaml" @@ -15,12 +16,12 @@ func (spec *ChartSpec) GetValuesMap(p getter.Providers) (map[string]interface{}, err := yaml.Unmarshal([]byte(spec.ValuesYaml), &valuesYaml) if err != nil { - return nil, errors.Wrap(err, "Failed to Parse ValuesYaml") + return nil, fmt.Errorf("failed to parse ValuesYaml: %w", err) } valuesOptions, err := spec.ValuesOptions.MergeValues(p) if err != nil { - return nil, errors.Wrap(err, "Failed to Parse ValuesOptions") + return nil, fmt.Errorf("failed to parse ValuesOptions: %w", err) } return values.MergeMaps(valuesYaml, valuesOptions), nil diff --git a/vendor/github.com/mittwald/go-helm-client/types.go b/vendor/github.com/mittwald/go-helm-client/types.go index e78f75615..2256b004d 100644 --- a/vendor/github.com/mittwald/go-helm-client/types.go +++ b/vendor/github.com/mittwald/go-helm-client/types.go @@ -176,6 +176,9 @@ type ChartSpec struct { // ReuseValues indicates whether to reuse the values.yaml file during installation. // +optional ReuseValues bool `json:"reuseValues,omitempty"` + // ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values. + // +optional + ResetThenReuseValues bool // Recreate indicates whether to recreate the release if it already exists. // +optional Recreate bool `json:"recreate,omitempty"` @@ -188,10 +191,15 @@ type ChartSpec struct { // DryRun indicates whether to perform a dry run. // +optional DryRun bool `json:"dryRun,omitempty"` + // DryRunOption controls whether the operation is prepared, but not executed with options on whether or not to interact with the remote cluster. + DryRunOption string `json:"dryRunOption,omitempty"` // Description specifies a custom description for the uninstalled release // +optional Description string `json:"description,omitempty"` // KeepHistory indicates whether to retain or purge the release history during uninstall // +optional KeepHistory bool `json:"keepHistory,omitempty"` + // Labels specifies a set of labels to be applied to the release + // +optional + Labels map[string]string `json:"labels,omitempty"` } diff --git a/vendor/github.com/mittwald/go-helm-client/values/options.go b/vendor/github.com/mittwald/go-helm-client/values/options.go index 0fcc96099..285b75cc4 100644 --- a/vendor/github.com/mittwald/go-helm-client/values/options.go +++ b/vendor/github.com/mittwald/go-helm-client/values/options.go @@ -23,12 +23,12 @@ Changes: package values import ( + "fmt" "io" "net/url" "os" "strings" - "github.com/pkg/errors" "sigs.k8s.io/yaml" "helm.sh/helm/v3/pkg/getter" @@ -65,7 +65,7 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er } if err := yaml.Unmarshal(bytes, ¤tMap); err != nil { - return nil, errors.Wrapf(err, "failed to parse %s", filePath) + return nil, fmt.Errorf("failed to parse %s: %w", filePath, err) } // Merge with the previous map base = MergeMaps(base, currentMap) @@ -74,21 +74,21 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er // User specified a value via --set-json for _, value := range opts.JSONValues { if err := strvals.ParseJSON(value, base); err != nil { - return nil, errors.Errorf("failed parsing --set-json data %s", value) + return nil, fmt.Errorf("failed parsing --set-json data %s: %w", value, err) } } // User specified a value via --set for _, value := range opts.Values { if err := strvals.ParseInto(value, base); err != nil { - return nil, errors.Wrap(err, "failed parsing --set data") + return nil, fmt.Errorf("failed parsing --set data: %w", err) } } // User specified a value via --set-string for _, value := range opts.StringValues { if err := strvals.ParseIntoString(value, base); err != nil { - return nil, errors.Wrap(err, "failed parsing --set-string data") + return nil, fmt.Errorf("failed parsing --set-string data: %w", err) } } @@ -102,7 +102,7 @@ func (opts *Options) MergeValues(p getter.Providers) (map[string]interface{}, er return string(bytes), err } if err := strvals.ParseIntoFile(value, base, reader); err != nil { - return nil, errors.Wrap(err, "failed parsing --set-file data") + return nil, fmt.Errorf("failed parsing --set-file data: %w", err) } } diff --git a/vendor/github.com/mittwald/go-helm-client/zz_generated.deepcopy.go b/vendor/github.com/mittwald/go-helm-client/zz_generated.deepcopy.go index 313e29ac6..ed77902fe 100644 --- a/vendor/github.com/mittwald/go-helm-client/zz_generated.deepcopy.go +++ b/vendor/github.com/mittwald/go-helm-client/zz_generated.deepcopy.go @@ -10,6 +10,13 @@ import () func (in *ChartSpec) DeepCopyInto(out *ChartSpec) { *out = *in in.ValuesOptions.DeepCopyInto(&out.ValuesOptions) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartSpec. diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go index d906bb05c..d649eccc8 100644 --- a/vendor/github.com/moby/spdystream/connection.go +++ b/vendor/github.com/moby/spdystream/connection.go @@ -208,9 +208,10 @@ type Connection struct { nextStreamId spdy.StreamId receivedStreamId spdy.StreamId - pingIdLock sync.Mutex - pingId uint32 - pingChans map[uint32]chan error + // pingLock protects pingChans and pingId + pingLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error shutdownLock sync.Mutex shutdownChan chan error @@ -274,16 +275,20 @@ func NewConnection(conn net.Conn, server bool) (*Connection, error) { // returns the response time func (s *Connection) Ping() (time.Duration, error) { pid := s.pingId - s.pingIdLock.Lock() + s.pingLock.Lock() if s.pingId > 0x7ffffffe { s.pingId = s.pingId - 0x7ffffffe } else { s.pingId = s.pingId + 2 } - s.pingIdLock.Unlock() pingChan := make(chan error) s.pingChans[pid] = pingChan - defer delete(s.pingChans, pid) + s.pingLock.Unlock() + defer func() { + s.pingLock.Lock() + delete(s.pingChans, pid) + s.pingLock.Unlock() + }() frame := &spdy.PingFrame{Id: pid} startTime := time.Now() @@ -612,10 +617,14 @@ func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { } func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { - if s.pingId&0x01 != frame.Id&0x01 { + s.pingLock.Lock() + pingId := s.pingId + pingChan, pingOk := s.pingChans[frame.Id] + s.pingLock.Unlock() + + if pingId&0x01 != frame.Id&0x01 { return s.framer.WriteFrame(frame) } - pingChan, pingOk := s.pingChans[frame.Id] if pingOk { close(pingChan) } @@ -731,16 +740,14 @@ func (s *Connection) shutdown(closeTimeout time.Duration) { if err != nil { duration := 10 * time.Minute - time.AfterFunc(duration, func() { - select { - case err, ok := <-s.shutdownChan: - if ok { - debugMessage("Unhandled close error after %s: %s", duration, err) - } - default: - } - }) - s.shutdownChan <- err + timer := time.NewTimer(duration) + defer timer.Stop() + select { + case s.shutdownChan <- err: + // error was handled + case <-timer.C: + debugMessage("Unhandled close error after %s: %s", duration, err) + } } close(s.shutdownChan) } diff --git a/vendor/github.com/moby/spdystream/stream.go b/vendor/github.com/moby/spdystream/stream.go index 404e3c02d..171c1e9e3 100644 --- a/vendor/github.com/moby/spdystream/stream.go +++ b/vendor/github.com/moby/spdystream/stream.go @@ -305,6 +305,8 @@ func (s *Stream) Identifier() uint32 { // IsFinished returns whether the stream has finished // sending data func (s *Stream) IsFinished() bool { + s.finishLock.Lock() + defer s.finishLock.Unlock() return s.finished } diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE deleted file mode 100644 index 1c2640164..000000000 --- a/vendor/github.com/morikuni/aec/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Taihei Morikuni - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md deleted file mode 100644 index 3cbc4343e..000000000 --- a/vendor/github.com/morikuni/aec/README.md +++ /dev/null @@ -1,178 +0,0 @@ -# aec - -[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) - -Go wrapper for ANSI escape code. - -## Install - -```bash -go get github.com/morikuni/aec -``` - -## Features - -ANSI escape codes depend on terminal environment. -Some of these features may not work. -Check supported Font-Style/Font-Color features with [checkansi](./checkansi). - -[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. - -### Cursor - -- `Up(n)` -- `Down(n)` -- `Right(n)` -- `Left(n)` -- `NextLine(n)` -- `PreviousLine(n)` -- `Column(col)` -- `Position(row, col)` -- `Save` -- `Restore` -- `Hide` -- `Show` -- `Report` - -### Erase - -- `EraseDisplay(mode)` -- `EraseLine(mode)` - -### Scroll - -- `ScrollUp(n)` -- `ScrollDown(n)` - -### Font Style - -- `Bold` -- `Faint` -- `Italic` -- `Underline` -- `BlinkSlow` -- `BlinkRapid` -- `Inverse` -- `Conceal` -- `CrossOut` -- `Frame` -- `Encircle` -- `Overline` - -### Font Color - -Foreground color. - -- `DefaultF` -- `BlackF` -- `RedF` -- `GreenF` -- `YellowF` -- `BlueF` -- `MagentaF` -- `CyanF` -- `WhiteF` -- `LightBlackF` -- `LightRedF` -- `LightGreenF` -- `LightYellowF` -- `LightBlueF` -- `LightMagentaF` -- `LightCyanF` -- `LightWhiteF` -- `Color3BitF(color)` -- `Color8BitF(color)` -- `FullColorF(r, g, b)` - -Background color. - -- `DefaultB` -- `BlackB` -- `RedB` -- `GreenB` -- `YellowB` -- `BlueB` -- `MagentaB` -- `CyanB` -- `WhiteB` -- `LightBlackB` -- `LightRedB` -- `LightGreenB` -- `LightYellowB` -- `LightBlueB` -- `LightMagentaB` -- `LightCyanB` -- `LightWhiteB` -- `Color3BitB(color)` -- `Color8BitB(color)` -- `FullColorB(r, g, b)` - -### Color Converter - -24bit RGB color to ANSI color. - -- `NewRGB3Bit(r, g, b)` -- `NewRGB8Bit(r, g, b)` - -### Builder - -To mix these features. - -```go -custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI -custom.Apply("Hello World") -``` - -## Usage - -1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` -2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` - -`aec.Reset` should be added when using font style or font color features. - -## Example - -Simple progressbar. - -![sample](./sample.gif) - -```go -package main - -import ( - "fmt" - "strings" - "time" - - "github.com/morikuni/aec" -) - -func main() { - const n = 20 - builder := aec.EmptyBuilder - - up2 := aec.Up(2) - col := aec.Column(n + 2) - bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) - label := builder.LightRedF().Underline().With(col).Right(1).ANSI - - // for up2 - fmt.Println() - fmt.Println() - - for i := 0; i <= n; i++ { - fmt.Print(up2) - fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) - fmt.Print("[") - fmt.Print(bar.Apply(strings.Repeat("=", i))) - fmt.Println(col.Apply("]")) - time.Sleep(100 * time.Millisecond) - } -} -``` - -## License - -[MIT](./LICENSE) - - diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go deleted file mode 100644 index 566be6eb1..000000000 --- a/vendor/github.com/morikuni/aec/aec.go +++ /dev/null @@ -1,137 +0,0 @@ -package aec - -import "fmt" - -// EraseMode is listed in a variable EraseModes. -type EraseMode uint - -var ( - // EraseModes is a list of EraseMode. - EraseModes struct { - // All erase all. - All EraseMode - - // Head erase to head. - Head EraseMode - - // Tail erase to tail. - Tail EraseMode - } - - // Save saves the cursor position. - Save ANSI - - // Restore restores the cursor position. - Restore ANSI - - // Hide hides the cursor. - Hide ANSI - - // Show shows the cursor. - Show ANSI - - // Report reports the cursor position. - Report ANSI -) - -// Up moves up the cursor. -func Up(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dA", n)) -} - -// Down moves down the cursor. -func Down(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dB", n)) -} - -// Right moves right the cursor. -func Right(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dC", n)) -} - -// Left moves left the cursor. -func Left(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dD", n)) -} - -// NextLine moves down the cursor to head of a line. -func NextLine(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dE", n)) -} - -// PreviousLine moves up the cursor to head of a line. -func PreviousLine(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dF", n)) -} - -// Column set the cursor position to a given column. -func Column(col uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dG", col)) -} - -// Position set the cursor position to a given absolute position. -func Position(row, col uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) -} - -// EraseDisplay erases display by given EraseMode. -func EraseDisplay(m EraseMode) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dJ", m)) -} - -// EraseLine erases lines by given EraseMode. -func EraseLine(m EraseMode) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dK", m)) -} - -// ScrollUp scrolls up the page. -func ScrollUp(n int) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dS", n)) -} - -// ScrollDown scrolls down the page. -func ScrollDown(n int) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dT", n)) -} - -func init() { - EraseModes = struct { - All EraseMode - Head EraseMode - Tail EraseMode - }{ - Tail: 0, - Head: 1, - All: 2, - } - - Save = newAnsi(esc + "s") - Restore = newAnsi(esc + "u") - Hide = newAnsi(esc + "?25l") - Show = newAnsi(esc + "?25h") - Report = newAnsi(esc + "6n") -} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go deleted file mode 100644 index e60722e6e..000000000 --- a/vendor/github.com/morikuni/aec/ansi.go +++ /dev/null @@ -1,59 +0,0 @@ -package aec - -import ( - "fmt" - "strings" -) - -const esc = "\x1b[" - -// Reset resets SGR effect. -const Reset string = "\x1b[0m" - -var empty = newAnsi("") - -// ANSI represents ANSI escape code. -type ANSI interface { - fmt.Stringer - - // With adapts given ANSIs. - With(...ANSI) ANSI - - // Apply wraps given string in ANSI. - Apply(string) string -} - -type ansiImpl string - -func newAnsi(s string) *ansiImpl { - r := ansiImpl(s) - return &r -} - -func (a *ansiImpl) With(ansi ...ANSI) ANSI { - return concat(append([]ANSI{a}, ansi...)) -} - -func (a *ansiImpl) Apply(s string) string { - return a.String() + s + Reset -} - -func (a *ansiImpl) String() string { - return string(*a) -} - -// Apply wraps given string in ANSIs. -func Apply(s string, ansi ...ANSI) string { - if len(ansi) == 0 { - return s - } - return concat(ansi).Apply(s) -} - -func concat(ansi []ANSI) ANSI { - strs := make([]string, 0, len(ansi)) - for _, p := range ansi { - strs = append(strs, p.String()) - } - return newAnsi(strings.Join(strs, "")) -} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go deleted file mode 100644 index 13bd002d4..000000000 --- a/vendor/github.com/morikuni/aec/builder.go +++ /dev/null @@ -1,388 +0,0 @@ -package aec - -// Builder is a lightweight syntax to construct customized ANSI. -type Builder struct { - ANSI ANSI -} - -// EmptyBuilder is an initialized Builder. -var EmptyBuilder *Builder - -// NewBuilder creates a Builder from existing ANSI. -func NewBuilder(a ...ANSI) *Builder { - return &Builder{concat(a)} -} - -// With is a syntax for With. -func (builder *Builder) With(a ...ANSI) *Builder { - return NewBuilder(builder.ANSI.With(a...)) -} - -// Up is a syntax for Up. -func (builder *Builder) Up(n uint) *Builder { - return builder.With(Up(n)) -} - -// Down is a syntax for Down. -func (builder *Builder) Down(n uint) *Builder { - return builder.With(Down(n)) -} - -// Right is a syntax for Right. -func (builder *Builder) Right(n uint) *Builder { - return builder.With(Right(n)) -} - -// Left is a syntax for Left. -func (builder *Builder) Left(n uint) *Builder { - return builder.With(Left(n)) -} - -// NextLine is a syntax for NextLine. -func (builder *Builder) NextLine(n uint) *Builder { - return builder.With(NextLine(n)) -} - -// PreviousLine is a syntax for PreviousLine. -func (builder *Builder) PreviousLine(n uint) *Builder { - return builder.With(PreviousLine(n)) -} - -// Column is a syntax for Column. -func (builder *Builder) Column(col uint) *Builder { - return builder.With(Column(col)) -} - -// Position is a syntax for Position. -func (builder *Builder) Position(row, col uint) *Builder { - return builder.With(Position(row, col)) -} - -// EraseDisplay is a syntax for EraseDisplay. -func (builder *Builder) EraseDisplay(m EraseMode) *Builder { - return builder.With(EraseDisplay(m)) -} - -// EraseLine is a syntax for EraseLine. -func (builder *Builder) EraseLine(m EraseMode) *Builder { - return builder.With(EraseLine(m)) -} - -// ScrollUp is a syntax for ScrollUp. -func (builder *Builder) ScrollUp(n int) *Builder { - return builder.With(ScrollUp(n)) -} - -// ScrollDown is a syntax for ScrollDown. -func (builder *Builder) ScrollDown(n int) *Builder { - return builder.With(ScrollDown(n)) -} - -// Save is a syntax for Save. -func (builder *Builder) Save() *Builder { - return builder.With(Save) -} - -// Restore is a syntax for Restore. -func (builder *Builder) Restore() *Builder { - return builder.With(Restore) -} - -// Hide is a syntax for Hide. -func (builder *Builder) Hide() *Builder { - return builder.With(Hide) -} - -// Show is a syntax for Show. -func (builder *Builder) Show() *Builder { - return builder.With(Show) -} - -// Report is a syntax for Report. -func (builder *Builder) Report() *Builder { - return builder.With(Report) -} - -// Bold is a syntax for Bold. -func (builder *Builder) Bold() *Builder { - return builder.With(Bold) -} - -// Faint is a syntax for Faint. -func (builder *Builder) Faint() *Builder { - return builder.With(Faint) -} - -// Italic is a syntax for Italic. -func (builder *Builder) Italic() *Builder { - return builder.With(Italic) -} - -// Underline is a syntax for Underline. -func (builder *Builder) Underline() *Builder { - return builder.With(Underline) -} - -// BlinkSlow is a syntax for BlinkSlow. -func (builder *Builder) BlinkSlow() *Builder { - return builder.With(BlinkSlow) -} - -// BlinkRapid is a syntax for BlinkRapid. -func (builder *Builder) BlinkRapid() *Builder { - return builder.With(BlinkRapid) -} - -// Inverse is a syntax for Inverse. -func (builder *Builder) Inverse() *Builder { - return builder.With(Inverse) -} - -// Conceal is a syntax for Conceal. -func (builder *Builder) Conceal() *Builder { - return builder.With(Conceal) -} - -// CrossOut is a syntax for CrossOut. -func (builder *Builder) CrossOut() *Builder { - return builder.With(CrossOut) -} - -// BlackF is a syntax for BlackF. -func (builder *Builder) BlackF() *Builder { - return builder.With(BlackF) -} - -// RedF is a syntax for RedF. -func (builder *Builder) RedF() *Builder { - return builder.With(RedF) -} - -// GreenF is a syntax for GreenF. -func (builder *Builder) GreenF() *Builder { - return builder.With(GreenF) -} - -// YellowF is a syntax for YellowF. -func (builder *Builder) YellowF() *Builder { - return builder.With(YellowF) -} - -// BlueF is a syntax for BlueF. -func (builder *Builder) BlueF() *Builder { - return builder.With(BlueF) -} - -// MagentaF is a syntax for MagentaF. -func (builder *Builder) MagentaF() *Builder { - return builder.With(MagentaF) -} - -// CyanF is a syntax for CyanF. -func (builder *Builder) CyanF() *Builder { - return builder.With(CyanF) -} - -// WhiteF is a syntax for WhiteF. -func (builder *Builder) WhiteF() *Builder { - return builder.With(WhiteF) -} - -// DefaultF is a syntax for DefaultF. -func (builder *Builder) DefaultF() *Builder { - return builder.With(DefaultF) -} - -// BlackB is a syntax for BlackB. -func (builder *Builder) BlackB() *Builder { - return builder.With(BlackB) -} - -// RedB is a syntax for RedB. -func (builder *Builder) RedB() *Builder { - return builder.With(RedB) -} - -// GreenB is a syntax for GreenB. -func (builder *Builder) GreenB() *Builder { - return builder.With(GreenB) -} - -// YellowB is a syntax for YellowB. -func (builder *Builder) YellowB() *Builder { - return builder.With(YellowB) -} - -// BlueB is a syntax for BlueB. -func (builder *Builder) BlueB() *Builder { - return builder.With(BlueB) -} - -// MagentaB is a syntax for MagentaB. -func (builder *Builder) MagentaB() *Builder { - return builder.With(MagentaB) -} - -// CyanB is a syntax for CyanB. -func (builder *Builder) CyanB() *Builder { - return builder.With(CyanB) -} - -// WhiteB is a syntax for WhiteB. -func (builder *Builder) WhiteB() *Builder { - return builder.With(WhiteB) -} - -// DefaultB is a syntax for DefaultB. -func (builder *Builder) DefaultB() *Builder { - return builder.With(DefaultB) -} - -// Frame is a syntax for Frame. -func (builder *Builder) Frame() *Builder { - return builder.With(Frame) -} - -// Encircle is a syntax for Encircle. -func (builder *Builder) Encircle() *Builder { - return builder.With(Encircle) -} - -// Overline is a syntax for Overline. -func (builder *Builder) Overline() *Builder { - return builder.With(Overline) -} - -// LightBlackF is a syntax for LightBlueF. -func (builder *Builder) LightBlackF() *Builder { - return builder.With(LightBlackF) -} - -// LightRedF is a syntax for LightRedF. -func (builder *Builder) LightRedF() *Builder { - return builder.With(LightRedF) -} - -// LightGreenF is a syntax for LightGreenF. -func (builder *Builder) LightGreenF() *Builder { - return builder.With(LightGreenF) -} - -// LightYellowF is a syntax for LightYellowF. -func (builder *Builder) LightYellowF() *Builder { - return builder.With(LightYellowF) -} - -// LightBlueF is a syntax for LightBlueF. -func (builder *Builder) LightBlueF() *Builder { - return builder.With(LightBlueF) -} - -// LightMagentaF is a syntax for LightMagentaF. -func (builder *Builder) LightMagentaF() *Builder { - return builder.With(LightMagentaF) -} - -// LightCyanF is a syntax for LightCyanF. -func (builder *Builder) LightCyanF() *Builder { - return builder.With(LightCyanF) -} - -// LightWhiteF is a syntax for LightWhiteF. -func (builder *Builder) LightWhiteF() *Builder { - return builder.With(LightWhiteF) -} - -// LightBlackB is a syntax for LightBlackB. -func (builder *Builder) LightBlackB() *Builder { - return builder.With(LightBlackB) -} - -// LightRedB is a syntax for LightRedB. -func (builder *Builder) LightRedB() *Builder { - return builder.With(LightRedB) -} - -// LightGreenB is a syntax for LightGreenB. -func (builder *Builder) LightGreenB() *Builder { - return builder.With(LightGreenB) -} - -// LightYellowB is a syntax for LightYellowB. -func (builder *Builder) LightYellowB() *Builder { - return builder.With(LightYellowB) -} - -// LightBlueB is a syntax for LightBlueB. -func (builder *Builder) LightBlueB() *Builder { - return builder.With(LightBlueB) -} - -// LightMagentaB is a syntax for LightMagentaB. -func (builder *Builder) LightMagentaB() *Builder { - return builder.With(LightMagentaB) -} - -// LightCyanB is a syntax for LightCyanB. -func (builder *Builder) LightCyanB() *Builder { - return builder.With(LightCyanB) -} - -// LightWhiteB is a syntax for LightWhiteB. -func (builder *Builder) LightWhiteB() *Builder { - return builder.With(LightWhiteB) -} - -// Color3BitF is a syntax for Color3BitF. -func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { - return builder.With(Color3BitF(c)) -} - -// Color3BitB is a syntax for Color3BitB. -func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { - return builder.With(Color3BitB(c)) -} - -// Color8BitF is a syntax for Color8BitF. -func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { - return builder.With(Color8BitF(c)) -} - -// Color8BitB is a syntax for Color8BitB. -func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { - return builder.With(Color8BitB(c)) -} - -// FullColorF is a syntax for FullColorF. -func (builder *Builder) FullColorF(r, g, b uint8) *Builder { - return builder.With(FullColorF(r, g, b)) -} - -// FullColorB is a syntax for FullColorB. -func (builder *Builder) FullColorB(r, g, b uint8) *Builder { - return builder.With(FullColorB(r, g, b)) -} - -// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. -func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { - return builder.Color3BitF(NewRGB3Bit(r, g, b)) -} - -// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. -func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { - return builder.Color3BitB(NewRGB3Bit(r, g, b)) -} - -// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. -func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { - return builder.Color8BitF(NewRGB8Bit(r, g, b)) -} - -// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. -func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { - return builder.Color8BitB(NewRGB8Bit(r, g, b)) -} - -func init() { - EmptyBuilder = &Builder{empty} -} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif deleted file mode 100644 index c6c613bb7..000000000 Binary files a/vendor/github.com/morikuni/aec/sample.gif and /dev/null differ diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go deleted file mode 100644 index 0ba3464e6..000000000 --- a/vendor/github.com/morikuni/aec/sgr.go +++ /dev/null @@ -1,202 +0,0 @@ -package aec - -import ( - "fmt" -) - -// RGB3Bit is a 3bit RGB color. -type RGB3Bit uint8 - -// RGB8Bit is a 8bit RGB color. -type RGB8Bit uint8 - -func newSGR(n uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", n)) -} - -// NewRGB3Bit create a RGB3Bit from given RGB. -func NewRGB3Bit(r, g, b uint8) RGB3Bit { - return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) -} - -// NewRGB8Bit create a RGB8Bit from given RGB. -func NewRGB8Bit(r, g, b uint8) RGB8Bit { - return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) -} - -// Color3BitF set the foreground color of text. -func Color3BitF(c RGB3Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) -} - -// Color3BitB set the background color of text. -func Color3BitB(c RGB3Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) -} - -// Color8BitF set the foreground color of text. -func Color8BitF(c RGB8Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) -} - -// Color8BitB set the background color of text. -func Color8BitB(c RGB8Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) -} - -// FullColorF set the foreground color of text. -func FullColorF(r, g, b uint8) ANSI { - return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) -} - -// FullColorB set the foreground color of text. -func FullColorB(r, g, b uint8) ANSI { - return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) -} - -// Style -var ( - // Bold set the text style to bold or increased intensity. - Bold ANSI - - // Faint set the text style to faint. - Faint ANSI - - // Italic set the text style to italic. - Italic ANSI - - // Underline set the text style to underline. - Underline ANSI - - // BlinkSlow set the text style to slow blink. - BlinkSlow ANSI - - // BlinkRapid set the text style to rapid blink. - BlinkRapid ANSI - - // Inverse swap the foreground color and background color. - Inverse ANSI - - // Conceal set the text style to conceal. - Conceal ANSI - - // CrossOut set the text style to crossed out. - CrossOut ANSI - - // Frame set the text style to framed. - Frame ANSI - - // Encircle set the text style to encircled. - Encircle ANSI - - // Overline set the text style to overlined. - Overline ANSI -) - -// Foreground color of text. -var ( - // DefaultF is the default color of foreground. - DefaultF ANSI - - // Normal color - BlackF ANSI - RedF ANSI - GreenF ANSI - YellowF ANSI - BlueF ANSI - MagentaF ANSI - CyanF ANSI - WhiteF ANSI - - // Light color - LightBlackF ANSI - LightRedF ANSI - LightGreenF ANSI - LightYellowF ANSI - LightBlueF ANSI - LightMagentaF ANSI - LightCyanF ANSI - LightWhiteF ANSI -) - -// Background color of text. -var ( - // DefaultB is the default color of background. - DefaultB ANSI - - // Normal color - BlackB ANSI - RedB ANSI - GreenB ANSI - YellowB ANSI - BlueB ANSI - MagentaB ANSI - CyanB ANSI - WhiteB ANSI - - // Light color - LightBlackB ANSI - LightRedB ANSI - LightGreenB ANSI - LightYellowB ANSI - LightBlueB ANSI - LightMagentaB ANSI - LightCyanB ANSI - LightWhiteB ANSI -) - -func init() { - Bold = newSGR(1) - Faint = newSGR(2) - Italic = newSGR(3) - Underline = newSGR(4) - BlinkSlow = newSGR(5) - BlinkRapid = newSGR(6) - Inverse = newSGR(7) - Conceal = newSGR(8) - CrossOut = newSGR(9) - - BlackF = newSGR(30) - RedF = newSGR(31) - GreenF = newSGR(32) - YellowF = newSGR(33) - BlueF = newSGR(34) - MagentaF = newSGR(35) - CyanF = newSGR(36) - WhiteF = newSGR(37) - - DefaultF = newSGR(39) - - BlackB = newSGR(40) - RedB = newSGR(41) - GreenB = newSGR(42) - YellowB = newSGR(43) - BlueB = newSGR(44) - MagentaB = newSGR(45) - CyanB = newSGR(46) - WhiteB = newSGR(47) - - DefaultB = newSGR(49) - - Frame = newSGR(51) - Encircle = newSGR(52) - Overline = newSGR(53) - - LightBlackF = newSGR(90) - LightRedF = newSGR(91) - LightGreenF = newSGR(92) - LightYellowF = newSGR(93) - LightBlueF = newSGR(94) - LightMagentaF = newSGR(95) - LightCyanF = newSGR(96) - LightWhiteF = newSGR(97) - - LightBlackB = newSGR(100) - LightRedB = newSGR(101) - LightGreenB = newSGR(102) - LightYellowB = newSGR(103) - LightBlueB = newSGR(104) - LightMagentaB = newSGR(105) - LightCyanB = newSGR(106) - LightWhiteB = newSGR(107) -} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index fbe515639..fd6e070c1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,158 @@ +## 2.22.0 + +### Features +- Add label to serial nodes [0fcaa08] + +This allows serial tests to be filtered using the `label-filter` + +### Maintenance +Various doc fixes + +## 2.21.0 + + + ### Features + - add support for GINKGO_TIME_FORMAT [a69eb39] + - add GINKGO_NO_COLOR to disable colors via environment variables [bcab9c8] + + ### Fixes + - increase threshold in timeline matcher [e548367] + - Fix the document by replacing `SpecsThatWillBeRun` with `SpecsThatWillRun` + [c2c4d3c] + + ### Maintenance + - bump various dependencies [7e65a00] + +## 2.20.2 + +Require Go 1.22+ + +### Maintenance +- bump go to v1.22 [a671816] + +## 2.20.1 + +### Fixes +- make BeSpecEvent duration matcher more forgiving [d6f9640] + +## 2.20.0 + +### Features +- Add buildvcs flag [be5ab95] + +### Maintenance +- Add update-deps to makefile [d303d14] +- bump all dependencies [7a50221] + +## 2.19.1 + +### Fixes +- update supported platforms for race conditions [63c8c30] +- [build] Allow custom name for binaries. [ff41e27] + +### Maintenance +- bump gomega [76f4e0c] +- Bump rexml from 3.2.6 to 3.2.8 in /docs (#1417) [b69c00d] +- Bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1425) [f097741] + +## 2.19.0 + +### Features + +[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering. + +## 2.18.0 + +### Features +- Add --slience-skips and --force-newlines [f010b65] +- fail when no tests were run and --fail-on-empty was set [d80eebe] + +### Fixes +- Fix table entry context edge case [42013d6] + +### Maintenance +- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7] +- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7] + +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + +## 2.17.2 + +### Fixes +- fix: close files [32259c8] +- fix github output log level for skipped specs [780e7a3] + +### Maintenance +- Bump github.com/google/pprof [d91fe4e] +- Bump github.com/go-task/slim-sprig to v3 [8cb662e] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422] +- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4] +- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8] +- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4] +- Fix test for gomega version bump [f2fcd97] +- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2] +- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26] +- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170] +- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a] + +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + +## 2.16.0 + +### Features +- add SpecContext to reporting nodes + +### Fixes +- merge coverages instead of combining them (#1329) (#1340) [23f0cc5] +- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7] + +### Maintenance +- docs/index.md: Typo [2cebe8d] +- fix docs [06de431] +- chore: test with Go 1.22 (#1352) [898cba9] +- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120] +- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed] +- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69] +- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d] +- Fix docs for handling failures in goroutines (#1339) [4471b2e] + +## 2.15.0 + +### Features + +- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70] +- include cancellation reason when cancelling spec context [96e915c] + +### Fixes + +- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09] +- fix outline when using nodot in ginkgo v2 [dca77c8] +- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f] +- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14] + +### Maintenance + +- Bump to go 1.20 [4fcd0b3] + ## 2.14.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1da92fe7e..80de566a5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Make sure all the tests succeed via `ginkgo -r -p` -- Vet your changes via `go vet ./...` -- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Run `make` or: + - Install ginkgo locally via `go install ./...` + - Make sure all the tests succeed via `ginkgo -r -p` + - Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! \ No newline at end of file +Thanks for supporting Ginkgo! diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile new file mode 100644 index 000000000..06dff97cd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -0,0 +1,16 @@ +# default task since it's first +.PHONY: all +all: vet test + +.PHONY: test +test: + go run github.com/onsi/ginkgo/v2/ginkgo -r -p -randomize-all -keep-going + +.PHONY: vet +vet: + go vet ./... + +.PHONY: update-deps +update-deps: + go get -u ./... + go mod tidy \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 2d7a70ecc..a3e8237e9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels { return suiteLabels } +func getwd() (string, error) { + if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") { + // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache + // is shared between two different directories with the same test code. + return os.Getwd() + } + return "", nil +} + /* PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. See http://onsi.github.io/ginkgo/#previewing-specs for more information. @@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -783,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 743555dde..4d5749114 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter { return fmt.Sprintf("\x1b[38;5;%dm", colorCode) } + if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor { + colorMode = ColorModeNone + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 5db5d1a7b..fd1726084 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -2,6 +2,8 @@ package build import ( "fmt" + "os" + "path" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" @@ -53,7 +55,18 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - fmt.Printf("Compiled %s.test\n", suite.PackageName) + if len(goFlagsConfig.O) == 0 { + goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") + } else { + stat, err := os.Stat(goFlagsConfig.O) + if err != nil { + panic(err) + } + if stat.IsDir() { + goFlagsConfig.O += "/" + suite.PackageName + ".test" + } + } + fmt.Printf("Compiled %s\n", goFlagsConfig.O) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7a..b2dc59be6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec97..cf3b7cb6d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 86da7340d..48827cc5e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 000000000..3c5079ff4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index bd3c6d028..8e16d2bb0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -//loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } @@ -184,7 +173,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) @@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 958daccbf..5d8d00bb1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,10 +1,11 @@ package outline import ( - "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" + + "github.com/onsi/ginkgo/v2/types" ) const ( diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index 67ec5ab75..f0a6b5d26 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } + name = "ginkgo" } if name == "." { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc..0e6ae1f29 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 639541a16..02c6739e5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -15,6 +15,11 @@ GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's GinkgoT() takes an optional offset argument that can be used to get the correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following: + +- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf. +- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model. + You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 16f0dc227..0686f7410 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "sync" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { - //There's a reace in the internal integration tests if we don't make - //accessing _global_node_id_counter safe across goroutines. + // There's a reace in the internal integration tests if we don't make + // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 @@ -44,8 +43,8 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportSuiteBody func(types.Report) + ReportEachBody func(SpecContext, types.SpecReport) + ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool @@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} - //First get the CodeLocation up-to-date + // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: @@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} - //now process the rest of the args + // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): - break //ignore deprecated timeouts + break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -242,6 +241,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } case t == reflect.TypeOf(Serial): node.MarkedSerial = bool(arg.(serialType)) + if !labelsSeen["Serial"] { + node.Labels = append(node.Labels, "Serial") + } if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial")) } @@ -325,7 +327,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { - node.ReportEachBody = arg.(func(types.SpecReport)) + if fn, ok := arg.(func(types.SpecReport)); ok { + node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } + } else { + node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -333,7 +340,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { - node.ReportSuiteBody = arg.(func(types.Report)) + if fn, ok := arg.(func(types.Report)); ok { + node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } + } else { + node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -395,7 +407,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - //validations + // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2515b84a1..2d2ea2fc3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -17,7 +17,7 @@ type specContext struct { context.Context *ProgressReporterManager - cancel context.CancelFunc + cancel context.CancelCauseFunc suite *Suite } @@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. */ func NewSpecContext(suite *Suite) *specContext { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancelCause(context.Background()) sc := &specContext{ cancel: cancel, suite: suite, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 6746152ec..a3c9e6bf1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } - if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } + + if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") + suite.report.SuiteSucceeded = false + } } if ranBeforeSuite { @@ -594,8 +599,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func(SpecContext) { - nodes[i].ReportEachBody(report) + nodes[i].Body = func(ctx SpecContext) { + nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) @@ -762,7 +767,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() @@ -840,7 +845,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { - //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" @@ -858,7 +863,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } sc := NewSpecContext(suite) - defer sc.cancel() + defer sc.cancel(fmt.Errorf("spec has finished")) suite.selectiveLock.Lock() suite.currentSpecContext = sc @@ -918,9 +923,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ if outcomeFromRun != types.SpecStatePassed { additionalFailure := types.AdditionalFailure{ State: outcomeFromRun, - Failure: failure, //we make a copy - this will include all the configuration set up above... + Failure: failure, // we make a copy - this will include all the configuration set up above... } - //...and then we update the failure with the details from failureFromRun + // ...and then we update the failure with the details from failureFromRun additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.ProgressReport = types.ProgressReport{} if outcome == types.SpecStateTimedout { @@ -958,8 +963,8 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck - sc.cancel() - //and now we wait for the grace period + sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) + // and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() @@ -985,7 +990,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } progressReport = progressReport.WithoutOtherGoroutines() - sc.cancel() + sc.cancel(fmt.Errorf(interruptStatus.Message())) if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be758..480730486 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } @@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 816042208..562e0f62b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,6 +15,7 @@ import ( "fmt" "os" "path" + "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -104,6 +105,8 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -113,6 +116,8 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -172,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -195,6 +201,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -202,6 +214,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), + Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) @@ -312,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index f33786a2d..aa1a35176 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) { /* ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that -receives a SpecReport. They are called before the spec starts. +receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts. + +Example: + + ReportBeforeEach(func(report SpecReport) { // process report }) + ReportBeforeEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { +func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that -receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. +ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior. +They are called after the spec has completed and receive the final report for the spec. + +Example: + + ReportAfterEach(func(report SpecReport) { // process report }) + ReportAfterEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { +func ReportAfterEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function +that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportBeforeSuite(func(r Report) { // process report }) + ReportBeforeSuite(func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) @@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeSuite(body func(Report), args ...interface{}) bool { +func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* -ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion, +and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report }) + ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. -ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) +ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes @@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { +func ReportAfterSuite(text string, body any, args ...interface{}) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index a3aef821b..9074a57ac 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -95,7 +95,7 @@ For example: }) It("should return the expected message", func() { - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) Expect(err).NotTo(HaveOccurred()) Expect(string(body)).To(Equal(message)) }) @@ -269,11 +269,15 @@ func generateTable(description string, isSubtree bool, args ...interface{}) { internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if internalBodyType.NumIn() > 0. { + if internalBodyType.NumIn() > 0 { if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) { hasContext = true + if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) { + // we allow you to pass in a non-nil context + hasContext = false + } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a7..8c0dfab8c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -89,6 +90,9 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -198,6 +202,7 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string + BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -215,6 +220,7 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool + O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -264,7 +270,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -274,6 +280,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -320,7 +328,7 @@ var ParallelConfigFlags = GinkgoFlags{ // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", - Usage: "If set, suppress color output in default reporter."}, + Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -331,6 +339,12 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -499,7 +513,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", @@ -515,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", @@ -549,6 +565,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI @@ -602,7 +620,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -625,7 +643,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", "-o", destination, packageToBuild} + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, map[string]interface{}{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873..de69f3022 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651e..7fdc8aa23 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index aae69b04c..ddcbec1ba 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -3,13 +3,21 @@ package types import ( "encoding/json" "fmt" + "os" "sort" "strings" "time" ) const GINKGO_FOCUS_EXIT_CODE = 197 -const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" + +func init() { + if os.Getenv("GINKGO_TIME_FORMAT") != "" { + GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT") + } +} // Report captures information about a Ginkgo test run type Report struct { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 7015be128..0b51c0b56 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.14.0" +const VERSION = "2.22.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fe72a7b18..9f6090b8d 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,110 @@ +## 1.35.1 + +### Fixes +- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1] + +## 1.35.0 + +### Features + +- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265] +- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931] + +### Fixes + +- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e] + +### Maintenance + +- Bump all dependencies [a05a416] + +## 1.34.2 + +Require Go 1.22+ + +### Maintenance +- bump ginkgo as well [c59c6dc] +- bump to go 1.22 - remove x/exp dependency [8158b99] + +## 1.34.1 + +### Maintenance +- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd] + +## 1.34.0 + +### Features +- Add RoundTripper method to ghttp.Server [c549e0d] + +### Fixes +- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c] +- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67] + +### Maintenance +- bump ginkgo [8af2ece] +- Fix typo in docs [123a071] +- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083] +- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f] +- Bump github-pages from 230 to 231 in /docs (#748) [892c303] + +## 1.33.1 + +### Fixes +- fix confusing eventually docs [3a66379] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a] + +## 1.33.0 + +### Features + +`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer. + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb] +- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596] + +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + ## 1.30.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index c271a366a..1038d7dd4 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.30.0" +const GOMEGA_VERSION = "1.35.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -319,7 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. + +By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: + + Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) + +now either the context cacnellation or the timeout will cause Eventually to stop polling. + +If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: + + EnforceDefaultTimeoutsWhenUsingContexts() + +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -372,11 +384,11 @@ You can ensure that you get a number of consecutive successful tries before succ Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: - Eventually(..., "1s", "2s", ctx).Should(...) + Eventually(..., "10s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() @@ -491,6 +503,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { Default.SetDefaultConsistentlyPollingInterval(t) } +// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided. +func EnforceDefaultTimeoutsWhenUsingContexts() { + Default.EnforceDefaultTimeoutsWhenUsingContexts() +} + +// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`. +func DisableDefaultTimeoutsWhenUsingContext() { + Default.DisableDefaultTimeoutsWhenUsingContext() +} + // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against // the matcher passed to the Should and ShouldNot methods. // diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bce..8b4cd1f5b 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -335,7 +335,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { if assertion.asyncType == AsyncAssertionTypeConsistently { return time.After(assertion.g.DurationBundle.ConsistentlyDuration) } else { - if assertion.ctx == nil { + if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts { return time.After(assertion.g.DurationBundle.EventuallyTimeout) } else { return nil @@ -496,7 +496,15 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch for _, err := range []error{actualErr, matcherErr} { if pollingSignalErr, ok := AsPollingSignalError(err); ok { if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") + if pollingSignalErr.IsSuccessful() { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)") + } else { + return true // early escape hatch for Consistently + } + } else { + fail("Told to stop trying") + } return false } if pollingSignalErr.IsTryAgainAfter() { @@ -553,7 +561,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 6e0d90d3a..2e026c336 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -8,10 +8,11 @@ import ( ) type DurationBundle struct { - EventuallyTimeout time.Duration - EventuallyPollingInterval time.Duration - ConsistentlyDuration time.Duration - ConsistentlyPollingInterval time.Duration + EventuallyTimeout time.Duration + EventuallyPollingInterval time.Duration + ConsistentlyDuration time.Duration + ConsistentlyPollingInterval time.Duration + EnforceDefaultTimeoutsWhenUsingContexts bool } const ( @@ -20,15 +21,19 @@ const ( ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" + + EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS" ) func FetchDefaultDurationBundle() DurationBundle { + _, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName) return DurationBundle{ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second), EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond), - ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), - ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), + ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts, } } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index de1f4f336..c6e2fcc0e 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -127,3 +127,11 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) { func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) { g.DurationBundle.ConsistentlyPollingInterval = t } + +func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true +} + +func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false +} diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 83b04b1a4..3a4f7ddd9 100644 --- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -17,6 +17,7 @@ type PollingSignalError interface { error Wrap(err error) PollingSignalError Attach(description string, obj any) PollingSignalError + Successfully() PollingSignalError Now() } @@ -45,6 +46,7 @@ type PollingSignalErrorImpl struct { wrappedErr error pollingSignalErrorType PollingSignalErrorType duration time.Duration + successful bool Attachments []PollingSignalErrorAttachment } @@ -73,6 +75,11 @@ func (s *PollingSignalErrorImpl) Unwrap() error { return s.wrappedErr } +func (s *PollingSignalErrorImpl) Successfully() PollingSignalError { + s.successful = true + return s +} + func (s *PollingSignalErrorImpl) Now() { panic(s) } @@ -81,6 +88,10 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying } +func (s *PollingSignalErrorImpl) IsSuccessful() bool { + return s.successful +} + func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter } diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 43f994374..7ef27dc9c 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher { // // will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing)) // Expect(myThing.Sprocket).Should(Equal("foo")) // Expect(myThing.IsValid()).Should(BeTrue()) +// +// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, +// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - return &matchers.ReceiveMatcher{ - Arg: arg, + Args: args, } } @@ -394,7 +395,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb919..4e3897858 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index dca5b9446..5a236d7d6 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -30,15 +30,18 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool lenMatchers := len(matchers) lenValues := len(values) + success = true for i := 0; i < lenMatchers || i < lenValues; i++ { if i >= lenMatchers { matcher.extraIndex = i + success = false continue } if i >= lenValues { matcher.missingIndex = i + success = false return } @@ -49,15 +52,17 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool index: i, failure: err.Error(), }) + success = false } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), }) + success = false } } - return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil + return success, nil } func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 6989f78c4..8dd3f871a 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (interface{}, error) { +func extractField(actual interface{}, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -64,36 +64,46 @@ func extractField(actual interface{}, field string, matchername string) (interfa type HaveFieldMatcher struct { Field string Expected interface{} +} - extractedField interface{} - expectedMatcher omegaMatcher +func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { + var isMatcher bool + expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher) + if !isMatcher { + expectedMatcher = &EqualMatcher{Expected: matcher.Expected} + } + return expectedMatcher } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") + extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } - var isMatcher bool - matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher) - if !isMatcher { - matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected} - } - - return matcher.expectedMatcher.Match(matcher.extractedField) + return matcher.expectedMatcher().Match(extractedField) } func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field) - message += matcher.expectedMatcher.FailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().FailureMessage(extractedField) return message } func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field) - message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().NegatedFailureMessage(extractedField) return message } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 1936a2ba5..948164eaf 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -10,7 +11,7 @@ import ( ) type ReceiveMatcher struct { - Arg interface{} + Args []interface{} receivedValue reflect.Value channelClosed bool } @@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + var resultReference interface{} + + // Valid arg formats are as follows, always with optional POINTER before + // optional MATCHER: + // - Receive() + // - Receive(POINTER) + // - Receive(MATCHER) + // - Receive(POINTER, MATCHER) + args := matcher.Args + if len(args) > 0 { + arg := args[0] + _, isSubMatcher := arg.(omegaMatcher) + if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr { + // Consume optional POINTER arg first, if it ain't no matcher ;) + resultReference = arg + args = args[1:] + } + } + if len(args) > 0 { + arg := args[0] + subMatcher, hasSubMatcher = arg.(omegaMatcher) if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } + // At this point we assume the dev user wanted to assign a received + // value, so [POINTER,]MATCHER. + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1)) } + // Consume optional MATCHER arg. + args = args[1:] + } + if len(args) > 0 { + // If there are still args present, reject all. + return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher") } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ @@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) + if !didReceive { + return false, nil } - return false, nil + matcher.receivedValue = value + if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match { + return match, err + } + // if we received a match, then fall through in order to handle an + // optional assignment of the received value to the specified reference. } if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) + if resultReference != nil { + outValue := reflect.ValueOf(resultReference) if value.Type().AssignableTo(outValue.Elem().Type()) { outValue.Elem().Set(value) @@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro outValue.Elem().Set(value.Elem()) return true, nil } else { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1)) } } @@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { @@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 1c54edd8f..44aa61d4b 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,6 +1,8 @@ package bipartitegraph import ( + "slices" + . "github.com/onsi/gomega/matchers/support/goraph/edge" . "github.com/onsi/gomega/matchers/support/goraph/node" "github.com/onsi/gomega/matchers/support/goraph/util" @@ -157,6 +159,11 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} } + if done { // if last layer - into last layer must be only 'free' nodes + currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool { + return !matching.Free(in) + }) + } guideLayers = append(guideLayers, currentLayer) } diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 7c7adb941..30f2beed3 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -29,6 +29,8 @@ type Gomega interface { SetDefaultEventuallyPollingInterval(time.Duration) SetDefaultConsistentlyDuration(time.Duration) SetDefaultConsistentlyPollingInterval(time.Duration) + EnforceDefaultTimeoutsWhenUsingContexts() + DisableDefaultTimeoutsWhenUsingContext() } // All Gomega matchers must implement the GomegaMatcher interface diff --git a/vendor/github.com/opencontainers/go-digest/digestset/set.go b/vendor/github.com/opencontainers/go-digest/digestset/set.go deleted file mode 100644 index 71f24184c..000000000 --- a/vendor/github.com/opencontainers/go-digest/digestset/set.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2020, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 892ba3de9..ce8313e79 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -21,12 +21,20 @@ const ( // MediaTypeLayoutHeader specifies the media type for the oci-layout. MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageManifest specifies the media type for an image manifest. MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" +) +const ( // MediaTypeImageLayer is the media type used for layers referenced by the manifest. MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" @@ -37,7 +45,15 @@ const ( // MediaTypeImageLayerZstd is the media type used for zstd compressed // layers referenced by the manifest. MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" +) +// Non-distributable layer media-types. +// +// Deprecated: Non-distributable layers are deprecated, and not recommended +// for future use. Implementations SHOULD NOT produce new non-distributable +// layers. +// https://github.com/opencontainers/image-spec/pull/965 +const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. // @@ -66,10 +82,4 @@ const ( // layers. // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` - MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 11e09b584..7069ae44d 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.5" + VersionDev = "" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml deleted file mode 100644 index 3baf5a456..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml +++ /dev/null @@ -1,167 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/497 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: clusteroperators.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterOperator - listKind: ClusterOperatorList - plural: clusteroperators - shortNames: - - co - singular: clusteroperator - scope: Cluster - versions: - - additionalPrinterColumns: - - description: The version the operator is at. - jsonPath: .status.versions[?(@.name=="operator")].version - name: Version - type: string - - description: Whether the operator is running and stable. - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - description: Whether the operator is processing changes. - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - description: Whether the operator is degraded. - jsonPath: .status.conditions[?(@.type=="Degraded")].status - name: Degraded - type: string - - description: The time the operator's Available status last changed. - jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime - name: Since - type: date - name: v1 - schema: - openAPIV3Schema: - description: "ClusterOperator is the Custom Resource object which holds the - current state of an operator. This object is used by operators to convey - their state to the rest of the cluster. \n Compatibility level 1: Stable - within a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds configuration that could apply to any operator. - type: object - status: - description: status holds the information about the state of an operator. It - is consistent with status information across the Kubernetes ecosystem. - properties: - conditions: - description: conditions describes the state of the operator's managed - and monitored components. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - extension: - description: extension contains any additional status information - specific to the operator which owns this status object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - relatedObjects: - description: 'relatedObjects is a list of objects that are "interesting" - or related to this operator. Common uses are: 1. the detailed resource - driving the operator 2. operator namespaces 3. operand namespaces' - items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. - properties: - group: - description: group of the referent. - type: string - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: resource of the referent. - type: string - required: - - group - - name - - resource - type: object - type: array - versions: - description: versions is a slice of operator and operand version tuples. Operators - which manage multiple operands will have multiple operand entries - in the array. Available operators must report the version of the - operator itself with the name "operator". An operator reports a - new "operator" version when it has rolled out the new version to - all of its operands. - items: - properties: - name: - description: name is the name of the particular operand this - version is for. It usually matches container images, not - operators. - type: string - version: - description: version indicates which version of a particular - operand is currently being managed. It must always match - the Available operand. If 1.0.0 is Available, then this must - indicate 1.0.0 even if the operator is trying to rollout 1.1.0 - type: string - required: - - name - - version - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml deleted file mode 100644 index dcb871eee..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,780 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/495 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: clusterversions.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterVersion - plural: clusterversions - singular: clusterversion - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - name: v1 - schema: - openAPIV3Schema: - description: "ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. - properties: - capabilities: - description: capabilities configures the installation of optional, - core cluster components. A null value here is identical to an empty - object; see the child properties for default semantics. - properties: - additionalEnabledCapabilities: - description: additionalEnabledCapabilities extends the set of - managed capabilities beyond the baseline defined in baselineCapabilitySet. The - default is an empty set. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - baselineCapabilitySet: - description: baselineCapabilitySet selects an initial set of optional - capabilities to enable, which can be extended via additionalEnabledCapabilities. If - unset, the cluster will choose a default, and the default may - change over time. The current default is vCurrent. - enum: - - None - - v4.11 - - v4.12 - - v4.13 - - v4.14 - - v4.15 - - v4.16 - - vCurrent - type: string - type: object - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for - production clusters. - type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. - type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. \n Some of the fields are inter-related with - restrictions and meanings described here. 1. image is specified, - version is specified, architecture is specified. API validation - error. 2. image is specified, version is specified, architecture - is not specified. You should not do this. version is silently ignored - and image is used. 3. image is specified, version is not specified, - architecture is specified. API validation error. 4. image is specified, - version is not specified, architecture is not specified. image is - used. 5. image is not specified, version is specified, architecture - is specified. version and desired architecture are used to select - an image. 6. image is not specified, version is specified, architecture - is not specified. version and current architecture are used to select - an image. 7. image is not specified, version is not specified, architecture - is specified. API validation error. 8. image is not specified, version - is not specified, architecture is not specified. API validation - error. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." - properties: - architecture: - description: architecture is an optional field that indicates - the desired value of the cluster architecture. In this context - cluster architecture means either a single architecture or a - multi architecture. architecture can only be set to Multi thereby - only allowing updates from single to multi architecture. If - architecture is set, image cannot be set and version must be - set. Valid values are 'Multi' and empty. - enum: - - Multi - - "" - type: string - force: - description: force allows an administrator to update to an image - that has failed verification or upgradeable checks. This option - should only be used when the authenticity of the provided image - has been verified out of band because the provided image will - run with full administrative access to the cluster. Do not use - this flag with images that comes from unknown or potentially - malicious sources. - type: boolean - image: - description: image is a container image location that contains - the update. image should be used when the desired version does - not exist in availableUpdates or history. When image is set, - version is ignored. When image is set, version should be empty. - When image is set, architecture cannot be specified. - type: string - version: - description: version is a semantic version identifying the update - version. version is ignored if image is specified and required - if architecture is specified. - type: string - type: object - x-kubernetes-validations: - - message: cannot set both Architecture and Image - rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' - - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' - overrides: - description: overrides is list of overides for components that are - managed by cluster version operator. Marking a component unmanaged - will prevent the operator from creating or updating the object. - items: - description: ComponentOverride allows overriding cluster version - operator's behavior for a component. - properties: - group: - description: group identifies the API group that the kind is - in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the - resource is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator - should stop managing the resources in this cluster. Default: - false' - type: boolean - required: - - group - - kind - - name - - namespace - - unmanaged - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - group - - namespace - - name - x-kubernetes-list-type: map - signatureStores: - description: "signatureStores contains the upstream URIs to verify - release signatures and optional reference to a config map by name - containing the PEM-encoded CA bundle. \n By default, CVO will use - existing signature stores if this property is empty. The CVO will - check the release signatures in the local ConfigMaps first. It will - search for a valid signature in these stores in parallel only when - local ConfigMaps did not include a valid signature. Validation will - fail if none of the signature stores reply with valid signature - before timeout. Setting signatureStores will replace the default - signature stores with custom signature stores. Default stores can - be used with custom signature stores by adding them manually. \n - A maximum of 32 signature stores may be configured." - items: - description: SignatureStore represents the URL of custom Signature - Store - properties: - ca: - description: ca is an optional reference to a config map by - name containing the PEM-encoded CA bundle. It is used as a - trust anchor to validate the TLS certificate presented by - the remote server. The key "ca.crt" is used to locate the - data. If specified and the config map or expected key is not - found, the signature store is not honored. If the specified - ca data is not valid, the signature store is not honored. - If empty, we fall back to the CA configured via Proxy, which - is appended to the default system roots. The namespace for - this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - url: - description: url contains the upstream custom signature store - URL. url should be a valid absolute http/https URI of an upstream - signature store as per rfc1738. This must be provided and - cannot be empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - url - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - url - x-kubernetes-list-type: map - upstream: - description: upstream may be used to specify the preferred update - server. By default it will use the appropriate update server for - the cluster and region. - type: string - required: - - clusterID - type: object - status: - description: status contains information about the available updates and - any in-progress updates. - properties: - availableUpdates: - description: availableUpdates contains updates recommended for this - cluster. Updates which appear in conditionalUpdates but not in availableUpdates - may expose this cluster to known issues. This list may be empty - if no updates are recommended, if the update service is unavailable, - or if an invalid channel has been specified. - items: - description: Release represents an OpenShift release image and associated - metadata. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or - the metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set - for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - nullable: true - type: array - x-kubernetes-list-type: atomic - capabilities: - description: capabilities describes the state of optional, core cluster - components. - properties: - enabledCapabilities: - description: enabledCapabilities lists all the capabilities that - are currently managed. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - knownCapabilities: - description: knownCapabilities lists all the capabilities known - to the current cluster. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - type: object - conditionalUpdates: - description: conditionalUpdates contains the list of updates that - may be recommended for this cluster if it meets specific required - conditions. Consumers interested in the set of updates that are - actually recommended for this cluster should use availableUpdates. - This list may be empty if no updates are recommended, if the update - service is unavailable, or if an empty or invalid channel has been - specified. - items: - description: ConditionalUpdate represents an update which is recommended - to some clusters on the version the current cluster is reconciling, - but which may not be recommended for the current cluster. - properties: - conditions: - description: 'conditions represents the observations of the - conditional update''s current status. Known types are: * Recommended, - for whether the update is recommended for the current cluster.' - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - release: - description: release is the target of the update. - properties: - channels: - description: channels is the set of Cincinnati channels - to which the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is - optional if version is specified and the availableUpdates - field contains a matching version. - type: string - url: - description: url contains information about this release. - This URL is set by the 'url' metadata property on a release - or the metadata returned by the update API and should - be displayed as a link in user interfaces. The URL field - may not be set for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the - update version. When this field is part of spec, version - is optional if image is specified. - type: string - type: object - risks: - description: risks represents the range of issues associated - with updating to the target release. The cluster-version operator - will evaluate all entries, and only recommend the update if - there is at least one entry and all entries recommend the - update. - items: - description: ConditionalUpdateRisk represents a reason and - cluster-state for not recommending a conditional update. - properties: - matchingRules: - description: matchingRules is a slice of conditions for - deciding which clusters match the risk and which do - not. The slice is ordered by decreasing precedence. - The cluster-version operator will walk the slice in - order, and stop after the first it can successfully - evaluate. If no condition can be successfully evaluated, - the update will not be recommended. - items: - description: ClusterCondition is a union of typed cluster - conditions. The 'type' property determines which - of the type-specific properties are relevant. When - evaluated on a cluster, the condition may match, not - match, or fail to evaluate. - properties: - promql: - description: promQL represents a cluster condition - based on PromQL. - properties: - promql: - description: PromQL is a PromQL query classifying - clusters. This query query should return a - 1 in the match case and a 0 in the does-not-match - case. Queries which return no time series, - or which return values besides 0 or 1, are - evaluation failures. - type: string - required: - - promql - type: object - type: - description: type represents the cluster-condition - type. This defines the members and semantics of - any additional properties. - enum: - - Always - - PromQL - type: string - required: - - type - type: object - minItems: 1 - type: array - x-kubernetes-list-type: atomic - message: - description: message provides additional information about - the risk of updating, in the event that matchingRules - match the cluster state. This is only to be consumed - by humans. It may contain Line Feed characters (U+000A), - which should be rendered as new lines. - minLength: 1 - type: string - name: - description: name is the CamelCase reason for not recommending - a conditional update, in the event that matchingRules - match the cluster state. - minLength: 1 - type: string - url: - description: url contains information about this risk. - format: uri - minLength: 1 - type: string - required: - - matchingRules - - message - - name - - url - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - release - - risks - type: object - type: array - x-kubernetes-list-type: atomic - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an - update is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or - a tag. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, - and then will be updated when a new update is being applied. The - newest update is first in the list and it is ordered by recency. - Updates in the history have state Completed if the rollout completed - - if an update was failing or halfway applied the state will be - Partial. Only a limited amount of update history is preserved. - items: - description: UpdateHistory is a single attempted update to the cluster. - properties: - acceptedRisks: - description: acceptedRisks records risks which were accepted - to initiate the update. For example, it may menition an Upgradeable=False - or missing signature that was overriden via desiredUpdate.force, - or an update that was initiated despite not being in the availableUpdates - set of recommended update targets. - type: string - completionTime: - description: completionTime, if set, is when the update was - fully applied. The update that is currently being applied - will have a null completion time. Completion time will always - be set for entries that are not the current update (usually - to the started time of the next update). - format: date-time - nullable: true - type: string - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was - started. - format: date-time - type: string - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update - was properly verified before it was installed. If this is - false the cluster may not be trusted. Verified does not cover - upgradeable checks that depend on the cluster state at the - time when the update target was accepted. - type: boolean - version: - description: version is a semantic version identifying the update - version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - required: - - completionTime - - image - - startedTime - - state - - verified - type: object - type: array - x-kubernetes-list-type: atomic - observedGeneration: - description: observedGeneration reports which version of the spec - is being synced. If this value is not equal to metadata.generation, - then the desired and conditions fields may represent a previous - version. - format: int64 - type: integer - versionHash: - description: versionHash is a fingerprint of the content that the - cluster will be updated with. It is used by the operator to avoid - unnecessary work and is for internal use only. - type: string - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: the `baremetal` capability requires the `MachineAPI` capability, - which is neither explicitly or implicitly enabled in this cluster, please - enable the `MachineAPI` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' - - message: the `marketplace` capability requires the `OperatorLifecycleManager` - capability, which is neither explicitly or implicitly enabled in this - cluster, please enable the `OperatorLifecycleManager` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) - : true' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml deleted file mode 100644 index 371177aaf..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml +++ /dev/null @@ -1,727 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/495 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: clusterversions.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterVersion - plural: clusterversions - singular: clusterversion - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - name: v1 - schema: - openAPIV3Schema: - description: "ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. - properties: - capabilities: - description: capabilities configures the installation of optional, - core cluster components. A null value here is identical to an empty - object; see the child properties for default semantics. - properties: - additionalEnabledCapabilities: - description: additionalEnabledCapabilities extends the set of - managed capabilities beyond the baseline defined in baselineCapabilitySet. The - default is an empty set. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - baselineCapabilitySet: - description: baselineCapabilitySet selects an initial set of optional - capabilities to enable, which can be extended via additionalEnabledCapabilities. If - unset, the cluster will choose a default, and the default may - change over time. The current default is vCurrent. - enum: - - None - - v4.11 - - v4.12 - - v4.13 - - v4.14 - - v4.15 - - v4.16 - - vCurrent - type: string - type: object - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for - production clusters. - type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. - type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. \n Some of the fields are inter-related with - restrictions and meanings described here. 1. image is specified, - version is specified, architecture is specified. API validation - error. 2. image is specified, version is specified, architecture - is not specified. You should not do this. version is silently ignored - and image is used. 3. image is specified, version is not specified, - architecture is specified. API validation error. 4. image is specified, - version is not specified, architecture is not specified. image is - used. 5. image is not specified, version is specified, architecture - is specified. version and desired architecture are used to select - an image. 6. image is not specified, version is specified, architecture - is not specified. version and current architecture are used to select - an image. 7. image is not specified, version is not specified, architecture - is specified. API validation error. 8. image is not specified, version - is not specified, architecture is not specified. API validation - error. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." - properties: - architecture: - description: architecture is an optional field that indicates - the desired value of the cluster architecture. In this context - cluster architecture means either a single architecture or a - multi architecture. architecture can only be set to Multi thereby - only allowing updates from single to multi architecture. If - architecture is set, image cannot be set and version must be - set. Valid values are 'Multi' and empty. - enum: - - Multi - - "" - type: string - force: - description: force allows an administrator to update to an image - that has failed verification or upgradeable checks. This option - should only be used when the authenticity of the provided image - has been verified out of band because the provided image will - run with full administrative access to the cluster. Do not use - this flag with images that comes from unknown or potentially - malicious sources. - type: boolean - image: - description: image is a container image location that contains - the update. image should be used when the desired version does - not exist in availableUpdates or history. When image is set, - version is ignored. When image is set, version should be empty. - When image is set, architecture cannot be specified. - type: string - version: - description: version is a semantic version identifying the update - version. version is ignored if image is specified and required - if architecture is specified. - type: string - type: object - x-kubernetes-validations: - - message: cannot set both Architecture and Image - rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' - - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' - overrides: - description: overrides is list of overides for components that are - managed by cluster version operator. Marking a component unmanaged - will prevent the operator from creating or updating the object. - items: - description: ComponentOverride allows overriding cluster version - operator's behavior for a component. - properties: - group: - description: group identifies the API group that the kind is - in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the - resource is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator - should stop managing the resources in this cluster. Default: - false' - type: boolean - required: - - group - - kind - - name - - namespace - - unmanaged - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - group - - namespace - - name - x-kubernetes-list-type: map - upstream: - description: upstream may be used to specify the preferred update - server. By default it will use the appropriate update server for - the cluster and region. - type: string - required: - - clusterID - type: object - status: - description: status contains information about the available updates and - any in-progress updates. - properties: - availableUpdates: - description: availableUpdates contains updates recommended for this - cluster. Updates which appear in conditionalUpdates but not in availableUpdates - may expose this cluster to known issues. This list may be empty - if no updates are recommended, if the update service is unavailable, - or if an invalid channel has been specified. - items: - description: Release represents an OpenShift release image and associated - metadata. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or - the metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set - for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - nullable: true - type: array - x-kubernetes-list-type: atomic - capabilities: - description: capabilities describes the state of optional, core cluster - components. - properties: - enabledCapabilities: - description: enabledCapabilities lists all the capabilities that - are currently managed. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - knownCapabilities: - description: knownCapabilities lists all the capabilities known - to the current cluster. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - type: object - conditionalUpdates: - description: conditionalUpdates contains the list of updates that - may be recommended for this cluster if it meets specific required - conditions. Consumers interested in the set of updates that are - actually recommended for this cluster should use availableUpdates. - This list may be empty if no updates are recommended, if the update - service is unavailable, or if an empty or invalid channel has been - specified. - items: - description: ConditionalUpdate represents an update which is recommended - to some clusters on the version the current cluster is reconciling, - but which may not be recommended for the current cluster. - properties: - conditions: - description: 'conditions represents the observations of the - conditional update''s current status. Known types are: * Recommended, - for whether the update is recommended for the current cluster.' - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - release: - description: release is the target of the update. - properties: - channels: - description: channels is the set of Cincinnati channels - to which the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is - optional if version is specified and the availableUpdates - field contains a matching version. - type: string - url: - description: url contains information about this release. - This URL is set by the 'url' metadata property on a release - or the metadata returned by the update API and should - be displayed as a link in user interfaces. The URL field - may not be set for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the - update version. When this field is part of spec, version - is optional if image is specified. - type: string - type: object - risks: - description: risks represents the range of issues associated - with updating to the target release. The cluster-version operator - will evaluate all entries, and only recommend the update if - there is at least one entry and all entries recommend the - update. - items: - description: ConditionalUpdateRisk represents a reason and - cluster-state for not recommending a conditional update. - properties: - matchingRules: - description: matchingRules is a slice of conditions for - deciding which clusters match the risk and which do - not. The slice is ordered by decreasing precedence. - The cluster-version operator will walk the slice in - order, and stop after the first it can successfully - evaluate. If no condition can be successfully evaluated, - the update will not be recommended. - items: - description: ClusterCondition is a union of typed cluster - conditions. The 'type' property determines which - of the type-specific properties are relevant. When - evaluated on a cluster, the condition may match, not - match, or fail to evaluate. - properties: - promql: - description: promQL represents a cluster condition - based on PromQL. - properties: - promql: - description: PromQL is a PromQL query classifying - clusters. This query query should return a - 1 in the match case and a 0 in the does-not-match - case. Queries which return no time series, - or which return values besides 0 or 1, are - evaluation failures. - type: string - required: - - promql - type: object - type: - description: type represents the cluster-condition - type. This defines the members and semantics of - any additional properties. - enum: - - Always - - PromQL - type: string - required: - - type - type: object - minItems: 1 - type: array - x-kubernetes-list-type: atomic - message: - description: message provides additional information about - the risk of updating, in the event that matchingRules - match the cluster state. This is only to be consumed - by humans. It may contain Line Feed characters (U+000A), - which should be rendered as new lines. - minLength: 1 - type: string - name: - description: name is the CamelCase reason for not recommending - a conditional update, in the event that matchingRules - match the cluster state. - minLength: 1 - type: string - url: - description: url contains information about this risk. - format: uri - minLength: 1 - type: string - required: - - matchingRules - - message - - name - - url - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - release - - risks - type: object - type: array - x-kubernetes-list-type: atomic - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an - update is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or - a tag. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, - and then will be updated when a new update is being applied. The - newest update is first in the list and it is ordered by recency. - Updates in the history have state Completed if the rollout completed - - if an update was failing or halfway applied the state will be - Partial. Only a limited amount of update history is preserved. - items: - description: UpdateHistory is a single attempted update to the cluster. - properties: - acceptedRisks: - description: acceptedRisks records risks which were accepted - to initiate the update. For example, it may menition an Upgradeable=False - or missing signature that was overriden via desiredUpdate.force, - or an update that was initiated despite not being in the availableUpdates - set of recommended update targets. - type: string - completionTime: - description: completionTime, if set, is when the update was - fully applied. The update that is currently being applied - will have a null completion time. Completion time will always - be set for entries that are not the current update (usually - to the started time of the next update). - format: date-time - nullable: true - type: string - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was - started. - format: date-time - type: string - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update - was properly verified before it was installed. If this is - false the cluster may not be trusted. Verified does not cover - upgradeable checks that depend on the cluster state at the - time when the update target was accepted. - type: boolean - version: - description: version is a semantic version identifying the update - version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - required: - - completionTime - - image - - startedTime - - state - - verified - type: object - type: array - x-kubernetes-list-type: atomic - observedGeneration: - description: observedGeneration reports which version of the spec - is being synced. If this value is not equal to metadata.generation, - then the desired and conditions fields may represent a previous - version. - format: int64 - type: integer - versionHash: - description: versionHash is a fingerprint of the content that the - cluster will be updated with. It is used by the operator to avoid - unnecessary work and is for internal use only. - type: string - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: the `baremetal` capability requires the `MachineAPI` capability, - which is neither explicitly or implicitly enabled in this cluster, please - enable the `MachineAPI` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' - - message: the `marketplace` capability requires the `OperatorLifecycleManager` - capability, which is neither explicitly or implicitly enabled in this - cluster, please enable the `OperatorLifecycleManager` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) - : true' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index fcaf456a2..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,780 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/495 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: clusterversions.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterVersion - plural: clusterversions - singular: clusterversion - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - name: v1 - schema: - openAPIV3Schema: - description: "ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. - properties: - capabilities: - description: capabilities configures the installation of optional, - core cluster components. A null value here is identical to an empty - object; see the child properties for default semantics. - properties: - additionalEnabledCapabilities: - description: additionalEnabledCapabilities extends the set of - managed capabilities beyond the baseline defined in baselineCapabilitySet. The - default is an empty set. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - baselineCapabilitySet: - description: baselineCapabilitySet selects an initial set of optional - capabilities to enable, which can be extended via additionalEnabledCapabilities. If - unset, the cluster will choose a default, and the default may - change over time. The current default is vCurrent. - enum: - - None - - v4.11 - - v4.12 - - v4.13 - - v4.14 - - v4.15 - - v4.16 - - vCurrent - type: string - type: object - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for - production clusters. - type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. - type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. \n Some of the fields are inter-related with - restrictions and meanings described here. 1. image is specified, - version is specified, architecture is specified. API validation - error. 2. image is specified, version is specified, architecture - is not specified. You should not do this. version is silently ignored - and image is used. 3. image is specified, version is not specified, - architecture is specified. API validation error. 4. image is specified, - version is not specified, architecture is not specified. image is - used. 5. image is not specified, version is specified, architecture - is specified. version and desired architecture are used to select - an image. 6. image is not specified, version is specified, architecture - is not specified. version and current architecture are used to select - an image. 7. image is not specified, version is not specified, architecture - is specified. API validation error. 8. image is not specified, version - is not specified, architecture is not specified. API validation - error. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." - properties: - architecture: - description: architecture is an optional field that indicates - the desired value of the cluster architecture. In this context - cluster architecture means either a single architecture or a - multi architecture. architecture can only be set to Multi thereby - only allowing updates from single to multi architecture. If - architecture is set, image cannot be set and version must be - set. Valid values are 'Multi' and empty. - enum: - - Multi - - "" - type: string - force: - description: force allows an administrator to update to an image - that has failed verification or upgradeable checks. This option - should only be used when the authenticity of the provided image - has been verified out of band because the provided image will - run with full administrative access to the cluster. Do not use - this flag with images that comes from unknown or potentially - malicious sources. - type: boolean - image: - description: image is a container image location that contains - the update. image should be used when the desired version does - not exist in availableUpdates or history. When image is set, - version is ignored. When image is set, version should be empty. - When image is set, architecture cannot be specified. - type: string - version: - description: version is a semantic version identifying the update - version. version is ignored if image is specified and required - if architecture is specified. - type: string - type: object - x-kubernetes-validations: - - message: cannot set both Architecture and Image - rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' - - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' - overrides: - description: overrides is list of overides for components that are - managed by cluster version operator. Marking a component unmanaged - will prevent the operator from creating or updating the object. - items: - description: ComponentOverride allows overriding cluster version - operator's behavior for a component. - properties: - group: - description: group identifies the API group that the kind is - in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the - resource is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator - should stop managing the resources in this cluster. Default: - false' - type: boolean - required: - - group - - kind - - name - - namespace - - unmanaged - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - group - - namespace - - name - x-kubernetes-list-type: map - signatureStores: - description: "signatureStores contains the upstream URIs to verify - release signatures and optional reference to a config map by name - containing the PEM-encoded CA bundle. \n By default, CVO will use - existing signature stores if this property is empty. The CVO will - check the release signatures in the local ConfigMaps first. It will - search for a valid signature in these stores in parallel only when - local ConfigMaps did not include a valid signature. Validation will - fail if none of the signature stores reply with valid signature - before timeout. Setting signatureStores will replace the default - signature stores with custom signature stores. Default stores can - be used with custom signature stores by adding them manually. \n - A maximum of 32 signature stores may be configured." - items: - description: SignatureStore represents the URL of custom Signature - Store - properties: - ca: - description: ca is an optional reference to a config map by - name containing the PEM-encoded CA bundle. It is used as a - trust anchor to validate the TLS certificate presented by - the remote server. The key "ca.crt" is used to locate the - data. If specified and the config map or expected key is not - found, the signature store is not honored. If the specified - ca data is not valid, the signature store is not honored. - If empty, we fall back to the CA configured via Proxy, which - is appended to the default system roots. The namespace for - this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - url: - description: url contains the upstream custom signature store - URL. url should be a valid absolute http/https URI of an upstream - signature store as per rfc1738. This must be provided and - cannot be empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - url - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - url - x-kubernetes-list-type: map - upstream: - description: upstream may be used to specify the preferred update - server. By default it will use the appropriate update server for - the cluster and region. - type: string - required: - - clusterID - type: object - status: - description: status contains information about the available updates and - any in-progress updates. - properties: - availableUpdates: - description: availableUpdates contains updates recommended for this - cluster. Updates which appear in conditionalUpdates but not in availableUpdates - may expose this cluster to known issues. This list may be empty - if no updates are recommended, if the update service is unavailable, - or if an invalid channel has been specified. - items: - description: Release represents an OpenShift release image and associated - metadata. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or - the metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set - for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - nullable: true - type: array - x-kubernetes-list-type: atomic - capabilities: - description: capabilities describes the state of optional, core cluster - components. - properties: - enabledCapabilities: - description: enabledCapabilities lists all the capabilities that - are currently managed. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - knownCapabilities: - description: knownCapabilities lists all the capabilities known - to the current cluster. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - type: object - conditionalUpdates: - description: conditionalUpdates contains the list of updates that - may be recommended for this cluster if it meets specific required - conditions. Consumers interested in the set of updates that are - actually recommended for this cluster should use availableUpdates. - This list may be empty if no updates are recommended, if the update - service is unavailable, or if an empty or invalid channel has been - specified. - items: - description: ConditionalUpdate represents an update which is recommended - to some clusters on the version the current cluster is reconciling, - but which may not be recommended for the current cluster. - properties: - conditions: - description: 'conditions represents the observations of the - conditional update''s current status. Known types are: * Recommended, - for whether the update is recommended for the current cluster.' - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - release: - description: release is the target of the update. - properties: - channels: - description: channels is the set of Cincinnati channels - to which the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is - optional if version is specified and the availableUpdates - field contains a matching version. - type: string - url: - description: url contains information about this release. - This URL is set by the 'url' metadata property on a release - or the metadata returned by the update API and should - be displayed as a link in user interfaces. The URL field - may not be set for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the - update version. When this field is part of spec, version - is optional if image is specified. - type: string - type: object - risks: - description: risks represents the range of issues associated - with updating to the target release. The cluster-version operator - will evaluate all entries, and only recommend the update if - there is at least one entry and all entries recommend the - update. - items: - description: ConditionalUpdateRisk represents a reason and - cluster-state for not recommending a conditional update. - properties: - matchingRules: - description: matchingRules is a slice of conditions for - deciding which clusters match the risk and which do - not. The slice is ordered by decreasing precedence. - The cluster-version operator will walk the slice in - order, and stop after the first it can successfully - evaluate. If no condition can be successfully evaluated, - the update will not be recommended. - items: - description: ClusterCondition is a union of typed cluster - conditions. The 'type' property determines which - of the type-specific properties are relevant. When - evaluated on a cluster, the condition may match, not - match, or fail to evaluate. - properties: - promql: - description: promQL represents a cluster condition - based on PromQL. - properties: - promql: - description: PromQL is a PromQL query classifying - clusters. This query query should return a - 1 in the match case and a 0 in the does-not-match - case. Queries which return no time series, - or which return values besides 0 or 1, are - evaluation failures. - type: string - required: - - promql - type: object - type: - description: type represents the cluster-condition - type. This defines the members and semantics of - any additional properties. - enum: - - Always - - PromQL - type: string - required: - - type - type: object - minItems: 1 - type: array - x-kubernetes-list-type: atomic - message: - description: message provides additional information about - the risk of updating, in the event that matchingRules - match the cluster state. This is only to be consumed - by humans. It may contain Line Feed characters (U+000A), - which should be rendered as new lines. - minLength: 1 - type: string - name: - description: name is the CamelCase reason for not recommending - a conditional update, in the event that matchingRules - match the cluster state. - minLength: 1 - type: string - url: - description: url contains information about this risk. - format: uri - minLength: 1 - type: string - required: - - matchingRules - - message - - name - - url - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - release - - risks - type: object - type: array - x-kubernetes-list-type: atomic - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an - update is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or - a tag. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, - and then will be updated when a new update is being applied. The - newest update is first in the list and it is ordered by recency. - Updates in the history have state Completed if the rollout completed - - if an update was failing or halfway applied the state will be - Partial. Only a limited amount of update history is preserved. - items: - description: UpdateHistory is a single attempted update to the cluster. - properties: - acceptedRisks: - description: acceptedRisks records risks which were accepted - to initiate the update. For example, it may menition an Upgradeable=False - or missing signature that was overriden via desiredUpdate.force, - or an update that was initiated despite not being in the availableUpdates - set of recommended update targets. - type: string - completionTime: - description: completionTime, if set, is when the update was - fully applied. The update that is currently being applied - will have a null completion time. Completion time will always - be set for entries that are not the current update (usually - to the started time of the next update). - format: date-time - nullable: true - type: string - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was - started. - format: date-time - type: string - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update - was properly verified before it was installed. If this is - false the cluster may not be trusted. Verified does not cover - upgradeable checks that depend on the cluster state at the - time when the update target was accepted. - type: boolean - version: - description: version is a semantic version identifying the update - version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - required: - - completionTime - - image - - startedTime - - state - - verified - type: object - type: array - x-kubernetes-list-type: atomic - observedGeneration: - description: observedGeneration reports which version of the spec - is being synced. If this value is not equal to metadata.generation, - then the desired and conditions fields may represent a previous - version. - format: int64 - type: integer - versionHash: - description: versionHash is a fingerprint of the content that the - cluster will be updated with. It is used by the operator to avoid - unnecessary work and is for internal use only. - type: string - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: the `baremetal` capability requires the `MachineAPI` capability, - which is neither explicitly or implicitly enabled in this cluster, please - enable the `MachineAPI` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' - - message: the `marketplace` capability requires the `OperatorLifecycleManager` - capability, which is neither explicitly or implicitly enabled in this - cluster, please enable the `OperatorLifecycleManager` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) - : true' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml deleted file mode 100644 index b9cf439c5..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml +++ /dev/null @@ -1,106 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: proxies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Proxy - listKind: ProxyList - plural: proxies - singular: proxy - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Proxy holds cluster-wide information on how to configure default - proxies for the cluster. The canonical name is `cluster` \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds user-settable values for the proxy configuration - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. Empty - means unset and will not result in an env var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames and/or - CIDRs and/or IPs for which the proxy should not be used. Empty means - unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used to verify - readiness of the proxy. - items: - type: string - type: array - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing a - CA certificate bundle. The trustedCA field should only be consumed - by a proxy validator. The validator is responsible for reading the - certificate bundle from the required key \"ca-bundle.crt\", merging - it with the system default trust bundle, and writing the merged - trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections must use - the trusted-ca-bundle for all HTTPS requests to the proxy, and may - use the trusted-ca-bundle for non-proxy HTTPS requests as well. - \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". - Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate - bundle. -----END CERTIFICATE-----" - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames and/or - CIDRs for which the proxy should not be used. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml deleted file mode 100644 index cc42ea290..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - capability.openshift.io/name: marketplace - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: operatorhubs.config.openshift.io -spec: - group: config.openshift.io - names: - kind: OperatorHub - listKind: OperatorHubList - plural: operatorhubs - singular: operatorhub - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "OperatorHub is the Schema for the operatorhubs API. It can be - used to change the state of the default hub sources for OperatorHub on the - cluster from enabled to disabled and vice versa. \n Compatibility level - 1: Stable within a major release for a minimum of 12 months or 3 minor releases - (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: OperatorHubSpec defines the desired state of OperatorHub - properties: - disableAllDefaultSources: - description: disableAllDefaultSources allows you to disable all the - default hub sources. If this is true, a specific entry in sources - can be used to enable a default source. If this is false, a specific - entry in sources can be used to disable or enable a default source. - type: boolean - sources: - description: sources is the list of default hub sources and their - configuration. If the list is empty, it implies that the default - hub sources are enabled on the cluster unless disableAllDefaultSources - is true. If disableAllDefaultSources is true and sources is not - empty, the configuration present in sources will take precedence. - The list of default hub sources and their current state will always - be reflected in the status block. - items: - description: HubSource is used to specify the hub source and its - configuration - properties: - disabled: - description: disabled is used to disable a default hub source - on cluster - type: boolean - name: - description: name is the name of one of the default hub sources - maxLength: 253 - minLength: 1 - type: string - type: object - type: array - type: object - status: - description: OperatorHubStatus defines the observed state of OperatorHub. - The current state of the default hub sources will always be reflected - here. - properties: - sources: - description: sources encapsulates the result of applying the configuration - for each hub source - items: - description: HubSourceStatus is used to reflect the current state - of applying the configuration to a default source - properties: - disabled: - description: disabled is used to disable a default hub source - on cluster - type: boolean - message: - description: message provides more information regarding failures - type: string - name: - description: name is the name of one of the default hub sources - maxLength: 253 - minLength: 1 - type: string - status: - description: status indicates success or failure in applying - the configuration - type: string - type: object - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml deleted file mode 100644 index aeefb69bc..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,315 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: apiservers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: APIServer - listKind: APIServerList - plural: apiservers - singular: apiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - items: - type: string - type: array - audit: - default: - profile: Default - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - properties: - customRules: - description: customRules specify profiles per group. These profile - take precedence over the top-level profile field if they apply. - They are evaluation from top to bottom and the first one that - matches, applies. - items: - description: AuditCustomRule describes a custom rule for an - audit profile that takes precedence over the top-level profile. - properties: - group: - description: group is a name of group a request user must - be member of in order to this profile to apply. - minLength: 1 - type: string - profile: - description: "profile specifies the name of the desired - audit policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles - are provided: - Default: the existing default policy. - - WriteRequestBodies: like 'Default', but logs request - and response HTTP payloads for write requests (create, - update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read - requests (get, list). - None: no requests are logged at - all, not even oauthaccesstokens and oauthauthorizetokens. - \n If unset, the 'Default' profile is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - required: - - group - - profile - type: object - type: array - x-kubernetes-list-map-keys: - - group - x-kubernetes-list-type: map - profile: - default: Default - description: "profile specifies the name of the desired top-level - audit profile to be applied to all requests sent to any of the - OpenShift-provided API servers in the cluster (kube-apiserver, - openshift-apiserver and oauth-apiserver), with the exception - of those requests that match one or more of the customRules. - \n The following profiles are provided: - Default: default policy - which means MetaData level logging with the exception of events - (not logged at all), oauthaccesstokens and oauthauthorizetokens - (both logged at RequestBody level). - WriteRequestBodies: like - 'Default', but logs request and response HTTP payloads for write - requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read requests - (get, list). - None: no requests are logged at all, not even - oauthaccesstokens and oauthauthorizetokens. \n Warning: It is - not recommended to disable audit logging by using the `None` - profile unless you are fully aware of the risks of not logging - data that can be beneficial when troubleshooting issues. If - you disable audit logging and a support situation arises, you - might need to enable audit logging and reproduce the issue in - order to troubleshoot properly. \n If unset, the 'Default' profile - is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - type: object - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io - 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" - enum: - - "" - - identity - - aescbc - - aesgcm - type: string - type: object - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. - properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. - items: - type: string - type: array - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old, Intermediate - and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - ECDHE-RSA-AES128-GCM-SHA256 \n - ECDHE-ECDSA-AES128-GCM-SHA256 - \n minTLSVersion: VersionTLS11" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: - currently the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n minTLSVersion: VersionTLS12" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n minTLSVersion: VersionTLS13" - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n - DHE-RSA-CHACHA20-POLY1305 \n - ECDHE-ECDSA-AES128-SHA256 - \n - ECDHE-RSA-AES128-SHA256 \n - ECDHE-ECDSA-AES128-SHA \n - - ECDHE-RSA-AES128-SHA \n - ECDHE-ECDSA-AES256-SHA384 \n - ECDHE-RSA-AES256-SHA384 - \n - ECDHE-ECDSA-AES256-SHA \n - ECDHE-RSA-AES256-SHA \n - DHE-RSA-AES128-SHA256 - \n - DHE-RSA-AES256-SHA256 \n - AES128-GCM-SHA256 \n - AES256-GCM-SHA384 - \n - AES128-SHA256 \n - AES256-SHA256 \n - AES128-SHA \n - AES256-SHA - \n - DES-CBC3-SHA \n minTLSVersion: VersionTLS10" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml deleted file mode 100644 index f4445e768..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml +++ /dev/null @@ -1,315 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: apiservers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: APIServer - listKind: APIServerList - plural: apiservers - singular: apiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - items: - type: string - type: array - audit: - default: - profile: Default - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - properties: - customRules: - description: customRules specify profiles per group. These profile - take precedence over the top-level profile field if they apply. - They are evaluation from top to bottom and the first one that - matches, applies. - items: - description: AuditCustomRule describes a custom rule for an - audit profile that takes precedence over the top-level profile. - properties: - group: - description: group is a name of group a request user must - be member of in order to this profile to apply. - minLength: 1 - type: string - profile: - description: "profile specifies the name of the desired - audit policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles - are provided: - Default: the existing default policy. - - WriteRequestBodies: like 'Default', but logs request - and response HTTP payloads for write requests (create, - update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read - requests (get, list). - None: no requests are logged at - all, not even oauthaccesstokens and oauthauthorizetokens. - \n If unset, the 'Default' profile is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - required: - - group - - profile - type: object - type: array - x-kubernetes-list-map-keys: - - group - x-kubernetes-list-type: map - profile: - default: Default - description: "profile specifies the name of the desired top-level - audit profile to be applied to all requests sent to any of the - OpenShift-provided API servers in the cluster (kube-apiserver, - openshift-apiserver and oauth-apiserver), with the exception - of those requests that match one or more of the customRules. - \n The following profiles are provided: - Default: default policy - which means MetaData level logging with the exception of events - (not logged at all), oauthaccesstokens and oauthauthorizetokens - (both logged at RequestBody level). - WriteRequestBodies: like - 'Default', but logs request and response HTTP payloads for write - requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read requests - (get, list). - None: no requests are logged at all, not even - oauthaccesstokens and oauthauthorizetokens. \n Warning: It is - not recommended to disable audit logging by using the `None` - profile unless you are fully aware of the risks of not logging - data that can be beneficial when troubleshooting issues. If - you disable audit logging and a support situation arises, you - might need to enable audit logging and reproduce the issue in - order to troubleshoot properly. \n If unset, the 'Default' profile - is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - type: object - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io - 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" - enum: - - "" - - identity - - aescbc - - aesgcm - type: string - type: object - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. - properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. - items: - type: string - type: array - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old, Intermediate - and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - ECDHE-RSA-AES128-GCM-SHA256 \n - ECDHE-ECDSA-AES128-GCM-SHA256 - \n minTLSVersion: VersionTLS11" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: - currently the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n minTLSVersion: VersionTLS12" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n minTLSVersion: VersionTLS13" - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n - DHE-RSA-CHACHA20-POLY1305 \n - ECDHE-ECDSA-AES128-SHA256 - \n - ECDHE-RSA-AES128-SHA256 \n - ECDHE-ECDSA-AES128-SHA \n - - ECDHE-RSA-AES128-SHA \n - ECDHE-ECDSA-AES256-SHA384 \n - ECDHE-RSA-AES256-SHA384 - \n - ECDHE-ECDSA-AES256-SHA \n - ECDHE-RSA-AES256-SHA \n - DHE-RSA-AES128-SHA256 - \n - DHE-RSA-AES256-SHA256 \n - AES128-GCM-SHA256 \n - AES256-GCM-SHA384 - \n - AES128-SHA256 \n - AES256-SHA256 \n - AES128-SHA \n - AES256-SHA - \n - DES-CBC3-SHA \n minTLSVersion: VersionTLS10" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 37b054d19..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,315 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: apiservers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: APIServer - listKind: APIServerList - plural: apiservers - singular: apiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - items: - type: string - type: array - audit: - default: - profile: Default - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - properties: - customRules: - description: customRules specify profiles per group. These profile - take precedence over the top-level profile field if they apply. - They are evaluation from top to bottom and the first one that - matches, applies. - items: - description: AuditCustomRule describes a custom rule for an - audit profile that takes precedence over the top-level profile. - properties: - group: - description: group is a name of group a request user must - be member of in order to this profile to apply. - minLength: 1 - type: string - profile: - description: "profile specifies the name of the desired - audit policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles - are provided: - Default: the existing default policy. - - WriteRequestBodies: like 'Default', but logs request - and response HTTP payloads for write requests (create, - update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read - requests (get, list). - None: no requests are logged at - all, not even oauthaccesstokens and oauthauthorizetokens. - \n If unset, the 'Default' profile is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - required: - - group - - profile - type: object - type: array - x-kubernetes-list-map-keys: - - group - x-kubernetes-list-type: map - profile: - default: Default - description: "profile specifies the name of the desired top-level - audit profile to be applied to all requests sent to any of the - OpenShift-provided API servers in the cluster (kube-apiserver, - openshift-apiserver and oauth-apiserver), with the exception - of those requests that match one or more of the customRules. - \n The following profiles are provided: - Default: default policy - which means MetaData level logging with the exception of events - (not logged at all), oauthaccesstokens and oauthauthorizetokens - (both logged at RequestBody level). - WriteRequestBodies: like - 'Default', but logs request and response HTTP payloads for write - requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read requests - (get, list). - None: no requests are logged at all, not even - oauthaccesstokens and oauthauthorizetokens. \n Warning: It is - not recommended to disable audit logging by using the `None` - profile unless you are fully aware of the risks of not logging - data that can be beneficial when troubleshooting issues. If - you disable audit logging and a support situation arises, you - might need to enable audit logging and reproduce the issue in - order to troubleshoot properly. \n If unset, the 'Default' profile - is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - type: object - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io - 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" - enum: - - "" - - identity - - aescbc - - aesgcm - type: string - type: object - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. - properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. - items: - type: string - type: array - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old, Intermediate - and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - ECDHE-RSA-AES128-GCM-SHA256 \n - ECDHE-ECDSA-AES128-GCM-SHA256 - \n minTLSVersion: VersionTLS11" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: - currently the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n minTLSVersion: VersionTLS12" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n minTLSVersion: VersionTLS13" - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n - DHE-RSA-CHACHA20-POLY1305 \n - ECDHE-ECDSA-AES128-SHA256 - \n - ECDHE-RSA-AES128-SHA256 \n - ECDHE-ECDSA-AES128-SHA \n - - ECDHE-RSA-AES128-SHA \n - ECDHE-ECDSA-AES256-SHA384 \n - ECDHE-RSA-AES256-SHA384 - \n - ECDHE-ECDSA-AES256-SHA \n - ECDHE-RSA-AES256-SHA \n - DHE-RSA-AES128-SHA256 - \n - DHE-RSA-AES256-SHA256 \n - AES128-GCM-SHA256 \n - AES256-GCM-SHA384 - \n - AES128-SHA256 \n - AES256-SHA256 \n - AES128-SHA \n - AES256-SHA - \n - DES-CBC3-SHA \n minTLSVersion: VersionTLS10" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml deleted file mode 100644 index 6d8a5da7e..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml +++ /dev/null @@ -1,554 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: "OIDCProviders are OIDC identity providers that can issue - tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". - \n At most one provider can be configured." - items: - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform - information from an ID token into a cluster identity - properties: - groups: - description: Groups is a name of the claim that should be - used to construct groups for the cluster identity. The - referenced claim must use array of strings values. - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value - from the token in the result of the claim mapping. - \n By default, no prefixing occurs. \n Example: if - `prefix` is set to \"myoidc:\"\" and the `claim` in - JWT contains an array of strings \"a\", \"b\" and - \ \"c\", the mapping will result in an array of string - \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - required: - - claim - type: object - username: - description: "Username is a name of the claim that should - be used to construct usernames for the cluster identity. - \n Default value: \"sub\"" - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - properties: - prefixString: - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should - apply. \n By default, claims other than `email` will - be prefixed with the issuer URL to prevent naming - clashes with other plugins. \n Set to \"NoPrefix\" - to disable prefixing. \n Example: (1) `prefix` is - set to \"myoidc:\" and `claim` is set to \"username\". - If the JWT claim `username` contains value `userA`, - the resulting mapped value will be \"myoidc:userA\". - (2) `prefix` is set to \"myoidc:\" and `claim` is - set to \"email\". If the JWT `email` claim contains - value \"userA@myoidc.tld\", the resulting mapped value - will be \"myoidc:userA@myoidc.tld\". (3) `prefix` - is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include \"username\":\"userA\" and - \"email\":\"userA@myoidc.tld\", and `claim` is set - to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" - (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - type: object - claimValidationRules: - description: ClaimValidationRules are rules that are applied - to validate token claims to authenticate users. - items: - properties: - requiredClaim: - description: RequiredClaim allows configuring a required - claim name and its expected value - properties: - claim: - description: Claim is a name of a required claim. - Only claims with string values are supported. - minLength: 1 - type: string - requiredValue: - description: RequiredValue is the required value for - the claim. - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: Type sets the type of the validation rule - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - properties: - audiences: - description: Audiences is an array of audiences that the - token was issued for. Valid tokens must include at least - one of these values in their "aud" claim. Must be set - to exactly one value. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config - map in the configuration namespace. The .data of the configMap - must contain the "ca-bundle.crt" key. If unset, system - trust is used instead. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: Name of the OIDC provider - minLength: 1 - type: string - oidcClients: - description: OIDCClients contains configuration for the platform's - clients that need to request tokens from the issuer - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` - namespace that contains the client secret in the `clientSecret` - key of the `.data` field - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: ComponentName is the name of the component - that is supposed to consume this client configuration - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the - component that is supposed to consume this client configuration - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: ExtraScopes is an optional set of scopes - to request tokens with. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: OIDCClients is where participating operators place the - current OIDC client status for OIDC clients that can be customized - by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that - will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component - that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of - the `oidcClients` entry. \n Supported conditions include Available, - Degraded and Progressing. \n If Available is true, the component - is successfully using the configured client. If Degraded is - true, that means something has gone wrong trying to handle - the client configuration. If Progressing is true, that means - the component is taking some action related to the `oidcClients` - entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that - need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the - component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider - from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml deleted file mode 100644 index 2cabddacf..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml +++ /dev/null @@ -1,552 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - release.openshift.io/feature-set: Default - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: "OIDCProviders are OIDC identity providers that can issue - tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". - \n At most one provider can be configured." - items: - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform - information from an ID token into a cluster identity - properties: - groups: - description: Groups is a name of the claim that should be - used to construct groups for the cluster identity. The - referenced claim must use array of strings values. - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value - from the token in the result of the claim mapping. - \n By default, no prefixing occurs. \n Example: if - `prefix` is set to \"myoidc:\"\" and the `claim` in - JWT contains an array of strings \"a\", \"b\" and - \ \"c\", the mapping will result in an array of string - \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - required: - - claim - type: object - username: - description: "Username is a name of the claim that should - be used to construct usernames for the cluster identity. - \n Default value: \"sub\"" - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - properties: - prefixString: - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should - apply. \n By default, claims other than `email` will - be prefixed with the issuer URL to prevent naming - clashes with other plugins. \n Set to \"NoPrefix\" - to disable prefixing. \n Example: (1) `prefix` is - set to \"myoidc:\" and `claim` is set to \"username\". - If the JWT claim `username` contains value `userA`, - the resulting mapped value will be \"myoidc:userA\". - (2) `prefix` is set to \"myoidc:\" and `claim` is - set to \"email\". If the JWT `email` claim contains - value \"userA@myoidc.tld\", the resulting mapped value - will be \"myoidc:userA@myoidc.tld\". (3) `prefix` - is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include \"username\":\"userA\" and - \"email\":\"userA@myoidc.tld\", and `claim` is set - to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" - (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - type: object - claimValidationRules: - description: ClaimValidationRules are rules that are applied - to validate token claims to authenticate users. - items: - properties: - requiredClaim: - description: RequiredClaim allows configuring a required - claim name and its expected value - properties: - claim: - description: Claim is a name of a required claim. - Only claims with string values are supported. - minLength: 1 - type: string - requiredValue: - description: RequiredValue is the required value for - the claim. - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: Type sets the type of the validation rule - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - properties: - audiences: - description: Audiences is an array of audiences that the - token was issued for. Valid tokens must include at least - one of these values in their "aud" claim. Must be set - to exactly one value. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config - map in the configuration namespace. The .data of the configMap - must contain the "ca-bundle.crt" key. If unset, system - trust is used instead. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: Name of the OIDC provider - minLength: 1 - type: string - oidcClients: - description: OIDCClients contains configuration for the platform's - clients that need to request tokens from the issuer - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` - namespace that contains the client secret in the `clientSecret` - key of the `.data` field - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: ComponentName is the name of the component - that is supposed to consume this client configuration - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the - component that is supposed to consume this client configuration - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: ExtraScopes is an optional set of scopes - to request tokens with. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: OIDCClients is where participating operators place the - current OIDC client status for OIDC clients that can be customized - by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that - will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component - that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of - the `oidcClients` entry. \n Supported conditions include Available, - Degraded and Progressing. \n If Available is true, the component - is successfully using the configured client. If Degraded is - true, that means something has gone wrong trying to handle - the client configuration. If Progressing is true, that means - the component is taking some action related to the `oidcClients` - entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that - need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the - component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider - from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch deleted file mode 100644 index dcc254fbd..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch +++ /dev/null @@ -1,285 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/oidcProviders - value: - description: "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". \n At most one provider can be configured." - type: array - maxItems: 1 - items: - type: object - required: - - issuer - - name - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform information from an ID token into a cluster identity - type: object - properties: - groups: - description: Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. - type: object - required: - - claim - properties: - claim: - description: Claim is a JWT token claim to be used in the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value from the token in the result of the claim mapping. \n By default, no prefixing occurs. \n Example: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - username: - description: "Username is a name of the claim that should be used to construct usernames for the cluster identity. \n Default value: \"sub\"" - type: object - required: - - claim - properties: - claim: - description: Claim is a JWT token claim to be used in the mapping - type: string - prefix: - type: object - required: - - prefixString - properties: - prefixString: - type: string - minLength: 1 - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should apply. \n By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. \n Set to \"NoPrefix\" to disable prefixing. \n Example: (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\". If the JWT claim `username` contains value `userA`, the resulting mapped value will be \"myoidc:userA\". (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the JWT `email` claim contains value \"userA@myoidc.tld\", the resulting mapped value will be \"myoidc:userA@myoidc.tld\". (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\", and `claim` is set to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - type: string - enum: - - "" - - NoPrefix - - Prefix - x-kubernetes-validations: - - rule: 'has(self.prefixPolicy) && self.prefixPolicy == ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)' - message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise - claimValidationRules: - description: ClaimValidationRules are rules that are applied to validate token claims to authenticate users. - type: array - items: - type: object - properties: - requiredClaim: - description: RequiredClaim allows configuring a required claim name and its expected value - type: object - required: - - claim - - requiredValue - properties: - claim: - description: Claim is a name of a required claim. Only claims with string values are supported. - type: string - minLength: 1 - requiredValue: - description: RequiredValue is the required value for the claim. - type: string - minLength: 1 - type: - description: Type sets the type of the validation rule - type: string - default: RequiredClaim - enum: - - RequiredClaim - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - type: object - required: - - audiences - - issuerURL - properties: - audiences: - description: Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. - type: array - maxItems: 10 - minItems: 1 - items: - type: string - minLength: 1 - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the "ca-bundle.crt" key. If unset, system trust is used instead. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config map - type: string - issuerURL: - description: URL is the serving URL of the token issuer. Must use the https:// scheme. - type: string - pattern: ^https:\/\/[^\s] - name: - description: Name of the OIDC provider - type: string - minLength: 1 - oidcClients: - description: OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer - type: array - maxItems: 20 - items: - type: object - required: - - clientID - - componentName - - componentNamespace - properties: - clientID: - description: ClientID is the identifier of the OIDC client from the OIDC provider - type: string - minLength: 1 - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - componentName: - description: ComponentName is the name of the component that is supposed to consume this client configuration - type: string - maxLength: 256 - minLength: 1 - componentNamespace: - description: ComponentNamespace is the namespace of the component that is supposed to consume this client configuration - type: string - maxLength: 63 - minLength: 1 - extraScopes: - description: ExtraScopes is an optional set of scopes to request tokens with. - type: array - items: - type: string - x-kubernetes-list-type: set - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/status/properties/oidcClients - value: - description: OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of the `oidcClients` entry. \n Supported conditions include Available, Degraded and Progressing. \n If Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry." - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/x-kubernetes-validations - value: - - message: all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))' -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/type/enum - value: - - "" - - None - - IntegratedOAuth - - OIDC diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml deleted file mode 100644 index 87e2434db..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml +++ /dev/null @@ -1,171 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml deleted file mode 100644 index cf6913a8f..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml +++ /dev/null @@ -1,555 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - formatted: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: "OIDCProviders are OIDC identity providers that can issue - tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". - \n At most one provider can be configured." - items: - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform - information from an ID token into a cluster identity - properties: - groups: - description: Groups is a name of the claim that should be - used to construct groups for the cluster identity. The - referenced claim must use array of strings values. - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value - from the token in the result of the claim mapping. - \n By default, no prefixing occurs. \n Example: if - `prefix` is set to \"myoidc:\"\" and the `claim` in - JWT contains an array of strings \"a\", \"b\" and - \ \"c\", the mapping will result in an array of string - \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - required: - - claim - type: object - username: - description: "Username is a name of the claim that should - be used to construct usernames for the cluster identity. - \n Default value: \"sub\"" - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - properties: - prefixString: - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should - apply. \n By default, claims other than `email` will - be prefixed with the issuer URL to prevent naming - clashes with other plugins. \n Set to \"NoPrefix\" - to disable prefixing. \n Example: (1) `prefix` is - set to \"myoidc:\" and `claim` is set to \"username\". - If the JWT claim `username` contains value `userA`, - the resulting mapped value will be \"myoidc:userA\". - (2) `prefix` is set to \"myoidc:\" and `claim` is - set to \"email\". If the JWT `email` claim contains - value \"userA@myoidc.tld\", the resulting mapped value - will be \"myoidc:userA@myoidc.tld\". (3) `prefix` - is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include \"username\":\"userA\" and - \"email\":\"userA@myoidc.tld\", and `claim` is set - to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" - (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - type: object - claimValidationRules: - description: ClaimValidationRules are rules that are applied - to validate token claims to authenticate users. - items: - properties: - requiredClaim: - description: RequiredClaim allows configuring a required - claim name and its expected value - properties: - claim: - description: Claim is a name of a required claim. - Only claims with string values are supported. - minLength: 1 - type: string - requiredValue: - description: RequiredValue is the required value for - the claim. - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: Type sets the type of the validation rule - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - properties: - audiences: - description: Audiences is an array of audiences that the - token was issued for. Valid tokens must include at least - one of these values in their "aud" claim. Must be set - to exactly one value. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config - map in the configuration namespace. The .data of the configMap - must contain the "ca-bundle.crt" key. If unset, system - trust is used instead. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: Name of the OIDC provider - minLength: 1 - type: string - oidcClients: - description: OIDCClients contains configuration for the platform's - clients that need to request tokens from the issuer - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` - namespace that contains the client secret in the `clientSecret` - key of the `.data` field - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: ComponentName is the name of the component - that is supposed to consume this client configuration - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the - component that is supposed to consume this client configuration - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: ExtraScopes is an optional set of scopes - to request tokens with. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: OIDCClients is where participating operators place the - current OIDC client status for OIDC clients that can be customized - by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that - will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component - that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of - the `oidcClients` entry. \n Supported conditions include Available, - Degraded and Progressing. \n If Available is true, the component - is successfully using the configured client. If Degraded is - true, that means something has gone wrong trying to handle - the client configuration. If Progressing is true, that means - the component is taking some action related to the `oidcClients` - entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that - need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the - component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider - from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml deleted file mode 100644 index ce7f789da..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: consoles.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Console - listKind: ConsoleList - plural: consoles - singular: console - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Console holds cluster-wide configuration for the web console, - including the logout URL, and reports the public URL of the console. The - canonical name is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - authentication: - description: ConsoleAuthentication defines a list of optional configuration - for console authentication. - properties: - logoutRedirect: - description: 'An optional, absolute URL to redirect web browsers - to after logging out of the console. If not specified, it will - redirect to the default login page. This is required when using - an identity provider that supports single sign-on (SSO) such - as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, - SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console - will destroy the user''s token. The logoutRedirect provides - the user the option to perform single logout (SLO) through the - identity provider to destroy their single sign-on session.' - pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - consoleURL: - description: The URL for the console. This will be derived from the - host for the route that is created for the console. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 7b1bee406..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: dnses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS holds cluster-wide information about DNS. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once - set, this field cannot be changed." - type: string - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for DNS. When omitted, this means the user - has no opinion and the platform is left to choose reasonable defaults. - These defaults are subject to change over time. - properties: - aws: - description: aws contains DNS configuration specific to the Amazon - Web Services cloud provider. - properties: - privateZoneIAMRole: - description: privateZoneIAMRole contains the ARN of an IAM - role that should be assumed when performing operations on - the cluster's private hosted zone specified in the cluster - DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ - type: string - type: object - type: - description: "type is the underlying infrastructure provider for - the cluster. Allowed values: \"\", \"AWS\". \n Individual components - may not support all platforms, and must handle unrecognized - platforms with best-effort defaults." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - x-kubernetes-validations: - - message: allowed values are '' and 'AWS' - rule: self in ['','AWS'] - required: - - type - type: object - x-kubernetes-validations: - - message: aws configuration is required when platform is AWS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : - !has(self.aws)' - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, - this field cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - publicZone: - description: "publicZone is the location where all the DNS records - that are publicly accessible to the internet exist. \n If this field - is nil, no public records should be created. \n Once set, this field - cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml deleted file mode 100644 index d2a3e7dc4..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: dnses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS holds cluster-wide information about DNS. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once - set, this field cannot be changed." - type: string - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for DNS. When omitted, this means the user - has no opinion and the platform is left to choose reasonable defaults. - These defaults are subject to change over time. - properties: - aws: - description: aws contains DNS configuration specific to the Amazon - Web Services cloud provider. - properties: - privateZoneIAMRole: - description: privateZoneIAMRole contains the ARN of an IAM - role that should be assumed when performing operations on - the cluster's private hosted zone specified in the cluster - DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ - type: string - type: object - type: - description: "type is the underlying infrastructure provider for - the cluster. Allowed values: \"\", \"AWS\". \n Individual components - may not support all platforms, and must handle unrecognized - platforms with best-effort defaults." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - x-kubernetes-validations: - - message: allowed values are '' and 'AWS' - rule: self in ['','AWS'] - required: - - type - type: object - x-kubernetes-validations: - - message: aws configuration is required when platform is AWS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : - !has(self.aws)' - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, - this field cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - publicZone: - description: "publicZone is the location where all the DNS records - that are publicly accessible to the internet exist. \n If this field - is nil, no public records should be created. \n Once set, this field - cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index b5fe24073..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: dnses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS holds cluster-wide information about DNS. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once - set, this field cannot be changed." - type: string - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for DNS. When omitted, this means the user - has no opinion and the platform is left to choose reasonable defaults. - These defaults are subject to change over time. - properties: - aws: - description: aws contains DNS configuration specific to the Amazon - Web Services cloud provider. - properties: - privateZoneIAMRole: - description: privateZoneIAMRole contains the ARN of an IAM - role that should be assumed when performing operations on - the cluster's private hosted zone specified in the cluster - DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ - type: string - type: object - type: - description: "type is the underlying infrastructure provider for - the cluster. Allowed values: \"\", \"AWS\". \n Individual components - may not support all platforms, and must handle unrecognized - platforms with best-effort defaults." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - x-kubernetes-validations: - - message: allowed values are '' and 'AWS' - rule: self in ['','AWS'] - required: - - type - type: object - x-kubernetes-validations: - - message: aws configuration is required when platform is AWS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : - !has(self.aws)' - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, - this field cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - publicZone: - description: "publicZone is the location where all the DNS records - that are publicly accessible to the internet exist. \n If this field - is nil, no public records should be created. \n Once set, this field - cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml deleted file mode 100644 index 159260e60..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml +++ /dev/null @@ -1,213 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: featuregates.config.openshift.io -spec: - group: config.openshift.io - names: - kind: FeatureGate - listKind: FeatureGateList - plural: featuregates - singular: featuregate - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Feature holds cluster-wide information about feature gates. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - customNoUpgrade: - description: customNoUpgrade allows the enabling or disabling of any - feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE - UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting - cannot be validated. If you have any typos or accidentally apply - invalid combinations your cluster may fail in an unrecoverable way. featureSet - must equal "CustomNoUpgrade" must be set to use this field. - nullable: true - properties: - disabled: - description: disabled is a list of all feature gates that you - want to force off - items: - description: FeatureGateName is a string to enforce patterns - on the name of a FeatureGate - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - type: array - enabled: - description: enabled is a list of all feature gates that you want - to force on - items: - description: FeatureGateName is a string to enforce patterns - on the name of a FeatureGate - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - type: array - type: object - featureSet: - description: featureSet changes the list of features in the cluster. The - default is empty. Be very careful adjusting this setting. Turning - on or off features may cause irreversible changes in your cluster - which cannot be undone. - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - conditions: - description: 'conditions represent the observations of the current - state. Known .status.conditions.type are: "DeterminationDegraded"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - featureGates: - description: featureGates contains a list of enabled and disabled - featureGates that are keyed by payloadVersion. Operators other than - the CVO and cluster-config-operator, must read the .status.featureGates, - locate the version they are managing, find the enabled/disabled - featuregates and make the operand and operator match. The enabled/disabled - values for a particular version may change during the life of the - cluster as various .spec.featureSet values are selected. Operators - may choose to restart their processes to pick up these changes, - but remembering past enable/disable lists is beyond the scope of - this API and is the responsibility of individual operators. Only - featureGates with .version in the ClusterVersion.status will be - present in this list. - items: - properties: - disabled: - description: disabled is a list of all feature gates that are - disabled in the cluster for the named version. - items: - properties: - name: - description: name is the name of the FeatureGate. - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - required: - - name - type: object - type: array - enabled: - description: enabled is a list of all feature gates that are - enabled in the cluster for the named version. - items: - properties: - name: - description: name is the name of the FeatureGate. - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - required: - - name - type: object - type: array - version: - description: version matches the version provided by the ClusterVersion - and in the ClusterOperator.Status.Versions field. - type: string - required: - - version - type: object - type: array - x-kubernetes-list-map-keys: - - version - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml deleted file mode 100644 index f53396aec..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml +++ /dev/null @@ -1,162 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: images.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Image - listKind: ImageList - plural: images - singular: image - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Image governs policies related to imagestream imports and runtime - configuration for external registries. It allows cluster admins to configure - which registries OpenShift is allowed to import images from, extra CA trust - bundles for external registries, and policies to block or allow registry - hostnames. When exposing OpenShift's image registry to the public, this - also lets cluster admins specify the external hostname. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalTrustedCA: - description: additionalTrustedCA is a reference to a ConfigMap containing - additional CAs that should be trusted during imagestream import, - pod image pull, build image pull, and imageregistry pullthrough. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - allowedRegistriesForImport: - description: allowedRegistriesForImport limits the container image - registries that normal users may import images from. Set this list - to the registries that you trust to contain valid Docker images - and that you want applications to be able to import from. Users - with permission to create Images or ImageStreamMappings via the - API are not affected by this policy - typically only administrators - or system integrations will have those permissions. - items: - description: RegistryLocation contains a location of the registry - specified by the registry domain name. The domain name might include - wildcards, like '*' or '??'. - properties: - domainName: - description: domainName specifies a domain name for the registry - In case the registry use non-standard (80 or 443) port, the - port should be included in the domain name as well. - type: string - insecure: - description: insecure indicates whether the registry is secure - (https) or insecure (http) By default (if not specified) the - registry is assumed as secure. - type: boolean - type: object - type: array - externalRegistryHostnames: - description: externalRegistryHostnames provides the hostnames for - the default external image registry. The external hostname should - be set only when the image registry is exposed externally. The first - value is used in 'publicDockerImageRepository' field in ImageStreams. - The value must be in "hostname[:port]" format. - items: - type: string - type: array - registrySources: - description: registrySources contains configuration that determines - how the container runtime should treat individual registries when - accessing images for builds+pods. (e.g. whether or not to allow - insecure access). It does not contain configuration for the internal - cluster registry. - properties: - allowedRegistries: - description: "allowedRegistries are the only registries permitted - for image pull and push actions. All other registries are denied. - \n Only one of BlockedRegistries or AllowedRegistries may be - set." - items: - type: string - type: array - blockedRegistries: - description: "blockedRegistries cannot be used for image pull - and push actions. All other registries are permitted. \n Only - one of BlockedRegistries or AllowedRegistries may be set." - items: - type: string - type: array - containerRuntimeSearchRegistries: - description: 'containerRuntimeSearchRegistries are registries - that will be searched when pulling images that do not have fully - qualified domains in their pull specs. Registries will be searched - in the order provided in the list. Note: this search list only - works with the container runtime, i.e CRI-O. Will NOT work with - builds or imagestream imports.' - format: hostname - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - insecureRegistries: - description: insecureRegistries are registries which do not have - a valid TLS certificates or only support HTTP connections. - items: - type: string - type: array - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - externalRegistryHostnames: - description: externalRegistryHostnames provides the hostnames for - the default external image registry. The external hostname should - be set only when the image registry is exposed externally. The first - value is used in 'publicDockerImageRepository' field in ImageStreams. - The value must be in "hostname[:port]" format. - items: - type: string - type: array - internalRegistryHostname: - description: internalRegistryHostname sets the hostname for the default - internal image registry. The value must be in "hostname[:port]" - format. This value is set by the image registry operator which controls - the internal registry hostname. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml deleted file mode 100644 index 2e30bc552..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml +++ /dev/null @@ -1,112 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/874 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagecontentpolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageContentPolicy - listKind: ImageContentPolicyList - plural: imagecontentpolicies - singular: imagecontentpolicy - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageContentPolicy holds cluster-wide information about how - to handle registry mirror rules. When multiple policies are defined, the - outcome of the behavior is defined on each field. \n Compatibility level - 1: Stable within a major release for a minimum of 12 months or 3 minor releases - (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - repositoryDigestMirrors: - description: "repositoryDigestMirrors allows images referenced by - image digests in pods to be pulled from alternative mirrored repository - locations. The image pull specification provided to the pod will - be compared to the source locations described in RepositoryDigestMirrors - and the image may be pulled down from any of the mirrors in the - list instead of the specified repository allowing administrators - to choose a potentially faster mirror. To pull image from mirrors - by tags, should set the \"allowMirrorByTags\". \n Each “source” - repository is treated independently; configurations for different - “source” repositories don’t interact. \n If the \"mirrors\" is not - specified, the image will continue to be pulled from the specified - repository in the pull spec. \n When multiple policies are defined - for the same “source” repository, the sets of defined mirrors will - be merged together, preserving the relative order of the mirrors, - if possible. For example, if policy A has mirrors `a, b, c` and - policy B has mirrors `c, d, e`, the mirrors will be used in the - order `a, b, c, d, e`. If the orders of mirror entries conflict - (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the - resulting order is unspecified." - items: - description: RepositoryDigestMirrors holds cluster-wide information - about how to handle mirrors in the registries config. - properties: - allowMirrorByTags: - description: allowMirrorByTags if true, the mirrors can be used - to pull the images that are referenced by their tags. Default - is false, the mirrors only work when pulling the images that - are referenced by their digests. Pulling images by tag can - potentially yield different images, depending on which endpoint - we pull from. Forcing digest-pulls for mirrors avoids that - issue. - type: boolean - mirrors: - description: mirrors is zero or more repositories that may also - contain the same images. If the "mirrors" is not specified, - the image will continue to be pulled from the specified repository - in the pull spec. No mirror will be configured. The order - of mirrors in this list is treated as the user's desired priority, - while source is by default considered lower priority than - all mirrors. Other cluster configuration, including (but not - limited to) other repositoryDigestMirrors objects, may impact - the exact order mirrors are contacted in, or some mirrors - may be contacted in parallel, so this should be considered - a preference rather than a guarantee of ordering. - items: - pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ - type: string - type: array - x-kubernetes-list-type: set - source: - description: source is the repository that users refer to, e.g. - in image pull specifications. - pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ - type: string - required: - - source - type: object - type: array - x-kubernetes-list-map-keys: - - source - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml deleted file mode 100644 index 422e46d43..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1126 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagedigestmirrorsets.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageDigestMirrorSet - listKind: ImageDigestMirrorSetList - plural: imagedigestmirrorsets - shortNames: - - idms - singular: imagedigestmirrorset - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageDigestMirrorSet holds cluster-wide information about how - to handle registry mirror rules on using digest pull specification. When - multiple policies are defined, the outcome of the behavior is defined on - each field. \n Compatibility level 1: Stable within a major release for - a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - imageDigestMirrors: - description: "imageDigestMirrors allows images referenced by image - digests in pods to be pulled from alternative mirrored repository - locations. The image pull specification provided to the pod will - be compared to the source locations described in imageDigestMirrors - and the image may be pulled down from any of the mirrors in the - list instead of the specified repository allowing administrators - to choose a potentially faster mirror. To use mirrors to pull images - using tag specification, users should configure a list of mirrors - using \"ImageTagMirrorSet\" CRD. \n If the image pull specification - matches the repository of \"source\" in multiple imagedigestmirrorset - objects, only the objects which define the most specific namespace - match will be used. For example, if there are objects using quay.io/libpod - and quay.io/libpod/busybox as the \"source\", only the objects using - quay.io/libpod/busybox are going to apply for pull specification - quay.io/libpod/busybox. Each “source” repository is treated independently; - configurations for different “source” repositories don’t interact. - \n If the \"mirrors\" is not specified, the image will continue - to be pulled from the specified repository in the pull spec. \n - When multiple policies are defined for the same “source” repository, - the sets of defined mirrors will be merged together, preserving - the relative order of the mirrors, if possible. For example, if - policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, - the mirrors will be used in the order `a, b, c, d, e`. If the orders - of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration - is not rejected but the resulting order is unspecified. Users who - want to use a specific order of mirrors, should configure them into - one list of mirrors using the expected order." - items: - description: ImageDigestMirrors holds cluster-wide information about - how to handle mirrors in the registries config. - properties: - mirrorSourcePolicy: - description: mirrorSourcePolicy defines the fallback policy - if fails to pull image from the mirrors. If unset, the image - will continue to be pulled from the the repository in the - pull spec. sourcePolicy is valid configuration only when one - or more mirrors are in the mirror list. - enum: - - NeverContactSource - - AllowContactingSource - type: string - mirrors: - description: 'mirrors is zero or more locations that may also - contain the same images. No mirror will be configured if not - specified. Images can be pulled from these mirrors only if - they are referenced by their digests. The mirrored location - is obtained by replacing the part of the input reference that - matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo - reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat - causes a mirror.local/redhat/product/repo repository to be - used. The order of mirrors in this list is treated as the - user''s desired priority, while source is by default considered - lower priority than all mirrors. If no mirror is specified - or all image pulls from the mirror list fail, the image will - continue to be pulled from the repository in the pull spec - unless explicitly prohibited by "mirrorSourcePolicy" Other - cluster configuration, including (but not limited to) other - imageDigestMirrors objects, may impact the exact order mirrors - are contacted in, or some mirrors may be contacted in parallel, - so this should be considered a preference rather than a guarantee - of ordering. "mirrors" uses one of the following formats: - host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - for more information about the format, see the document about - the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - items: - pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - type: array - x-kubernetes-list-type: set - source: - description: 'source matches the repository that users refer - to, e.g. in image pull specifications. Setting source to a - registry hostname e.g. docker.io. quay.io, or registry.redhat.io, - will match the image pull specification of corressponding - registry. "source" uses one of the following formats: host[:port] - host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - [*.]host for more information about the format, see the document - about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - required: - - source - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status contains the observed state of the resource. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml deleted file mode 100644 index abcab0166..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml +++ /dev/null @@ -1,144 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1126 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagetagmirrorsets.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageTagMirrorSet - listKind: ImageTagMirrorSetList - plural: imagetagmirrorsets - shortNames: - - itms - singular: imagetagmirrorset - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageTagMirrorSet holds cluster-wide information about how to - handle registry mirror rules on using tag pull specification. When multiple - policies are defined, the outcome of the behavior is defined on each field. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - imageTagMirrors: - description: "imageTagMirrors allows images referenced by image tags - in pods to be pulled from alternative mirrored repository locations. - The image pull specification provided to the pod will be compared - to the source locations described in imageTagMirrors and the image - may be pulled down from any of the mirrors in the list instead of - the specified repository allowing administrators to choose a potentially - faster mirror. To use mirrors to pull images using digest specification - only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" - CRD. \n If the image pull specification matches the repository of - \"source\" in multiple imagetagmirrorset objects, only the objects - which define the most specific namespace match will be used. For - example, if there are objects using quay.io/libpod and quay.io/libpod/busybox - as the \"source\", only the objects using quay.io/libpod/busybox - are going to apply for pull specification quay.io/libpod/busybox. - Each “source” repository is treated independently; configurations - for different “source” repositories don’t interact. \n If the \"mirrors\" - is not specified, the image will continue to be pulled from the - specified repository in the pull spec. \n When multiple policies - are defined for the same “source” repository, the sets of defined - mirrors will be merged together, preserving the relative order of - the mirrors, if possible. For example, if policy A has mirrors `a, - b, c` and policy B has mirrors `c, d, e`, the mirrors will be used - in the order `a, b, c, d, e`. If the orders of mirror entries conflict - (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the - resulting order is unspecified. Users who want to use a deterministic - order of mirrors, should configure them into one list of mirrors - using the expected order." - items: - description: ImageTagMirrors holds cluster-wide information about - how to handle mirrors in the registries config. - properties: - mirrorSourcePolicy: - description: mirrorSourcePolicy defines the fallback policy - if fails to pull image from the mirrors. If unset, the image - will continue to be pulled from the repository in the pull - spec. sourcePolicy is valid configuration only when one or - more mirrors are in the mirror list. - enum: - - NeverContactSource - - AllowContactingSource - type: string - mirrors: - description: 'mirrors is zero or more locations that may also - contain the same images. No mirror will be configured if not - specified. Images can be pulled from these mirrors only if - they are referenced by their tags. The mirrored location is - obtained by replacing the part of the input reference that - matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo - reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat - causes a mirror.local/redhat/product/repo repository to be - used. Pulling images by tag can potentially yield different - images, depending on which endpoint we pull from. Configuring - a list of mirrors using "ImageDigestMirrorSet" CRD and forcing - digest-pulls for mirrors avoids that issue. The order of mirrors - in this list is treated as the user''s desired priority, while - source is by default considered lower priority than all mirrors. - If no mirror is specified or all image pulls from the mirror - list fail, the image will continue to be pulled from the repository - in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". - Other cluster configuration, including (but not limited to) - other imageTagMirrors objects, may impact the exact order - mirrors are contacted in, or some mirrors may be contacted - in parallel, so this should be considered a preference rather - than a guarantee of ordering. "mirrors" uses one of the following - formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - for more information about the format, see the document about - the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - items: - pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - type: array - x-kubernetes-list-type: set - source: - description: 'source matches the repository that users refer - to, e.g. in image pull specifications. Setting source to a - registry hostname e.g. docker.io. quay.io, or registry.redhat.io, - will match the image pull specification of corressponding - registry. "source" uses one of the following formats: host[:port] - host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - [*.]host for more information about the format, see the document - about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - required: - - source - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status contains the observed state of the resource. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 975def7c1..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,2089 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: infrastructures.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Infrastructure - listKind: InfrastructureList - plural: infrastructures - singular: infrastructure - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Infrastructure holds cluster-wide information about Infrastructure. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - type: object - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - external: - description: ExternalPlatformType represents generic infrastructure - provider. Platform-specific components should be supplemented - separately. - properties: - platformName: - default: Unknown - description: PlatformName holds the arbitrary string representing - the infrastructure provider name, expected to be set at - the installation time. This field is solely for informational - and reporting purposes and is not expected to be used for - decision-making. - type: string - x-kubernetes-validations: - - message: platform name cannot be changed once set - rule: oldSelf == 'Unknown' || self == oldSelf - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - failureDomains: - description: failureDomains configures failure domains information - for the Nutanix platform. When set, the failure domains - defined here may be used to spread Machines across prism - element clusters to improve fault tolerance of the cluster. - items: - description: NutanixFailureDomain configures failure domain - information for the Nutanix platform. - properties: - cluster: - description: cluster is to identify the cluster (the - Prism Element under management of the Prism Central), - in which the Machine's VM will be created. The cluster - identifier (uuid or name) can be obtained from the - Prism Central console or using the prism_central API. - properties: - name: - description: name is the resource name in the PC. - It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource in - the PC. It cannot be empty if the type is UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - name: - description: name defines the unique name of a failure - domain. Name is required and must be at most 64 characters - in length. It must consist of only lower case alphanumeric - characters and hyphens (-). It must start and end - with an alphanumeric character. This value is arbitrary - and is used to identify the failure domain within - the platform. - maxLength: 64 - minLength: 1 - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - subnets: - description: subnets holds a list of identifiers (one - or more) of the cluster's network subnets for the - Machine's VM to connect to. The subnet identifiers - (uuid or name) can be obtained from the Prism Central - console or using the prism_central API. - items: - description: NutanixResourceIdentifier holds the identity - of a Nutanix PC resource (cluster, image, subnet, - etc.) - properties: - name: - description: name is the resource name in the - PC. It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource - in the PC. It cannot be empty if the type is - UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - required: - - cluster - - name - - subnets - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - prismCentral: - description: prismCentral holds the endpoint address and port - to access the Nutanix Prism Central. When a cluster-wide - proxy is installed, by default, this endpoint will be accessed - via the proxy. Should you wish for communication with this - endpoint not to be proxied, please add the endpoint to the - proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS name - or IP address) of the Nutanix Prism Central or Element - (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the Nutanix - Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - prismElements: - description: prismElements holds one or more endpoint address - and port data to access the Nutanix Prism Elements (clusters) - of the Nutanix Prism Central. Currently we only support - one Prism Element (cluster) for an OpenShift cluster, where - all the Nutanix resources (VMs, subnets, volumes, etc.) - used in the OpenShift cluster are located. In the future, - we may support Nutanix resources (VMs, etc.) spread over - multiple Prism Elements (clusters) of the Prism Central. - items: - description: NutanixPrismElementEndpoint holds the name - and endpoint data for a Prism Element (cluster) - properties: - endpoint: - description: endpoint holds the endpoint address and - port data of the Prism Element (cluster). When a cluster-wide - proxy is installed, by default, this endpoint will - be accessed via the proxy. Should you wish for communication - with this endpoint not to be proxied, please add the - endpoint to the proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS - name or IP address) of the Nutanix Prism Central - or Element (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the - Nutanix Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - name: - description: name is the name of the Prism Element (cluster). - This value will correspond with the cluster field - configured on other resources (eg Machines, PVCs, - etc). - maxLength: 256 - type: string - required: - - endpoint - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - prismCentral - - prismElements - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - powervs: - description: PowerVS contains settings specific to the IBM Power - Systems Virtual Servers infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", - "Nutanix" and "None". Individual components may not support - all platforms, and must handle unrecognized platforms as None - if they do not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - failureDomains: - description: failureDomains contains the definition of region, - zone and the vCenter topology. If this is omitted failure - domains (regions and zones) will not be used. - items: - description: VSpherePlatformFailureDomainSpec holds the - region and zone failure domain and the vCenter topology - of that failure domain. - properties: - name: - description: name defines the arbitrary but unique name - of a failure domain. - maxLength: 256 - minLength: 1 - type: string - region: - description: region defines the name of a region tag - that will be attached to a vCenter datacenter. The - tag category in vCenter must be named openshift-region. - maxLength: 80 - minLength: 1 - type: string - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - minLength: 1 - type: string - topology: - description: Topology describes a given failure domain - using vSphere constructs - properties: - computeCluster: - description: computeCluster the absolute path of - the vCenter cluster in which virtual machine will - be located. The absolute path is of the form //host/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*? - type: string - datacenter: - description: datacenter is the name of vCenter datacenter - in which virtual machines will be located. The - maximum length of the datacenter name is 80 characters. - maxLength: 80 - type: string - datastore: - description: datastore is the absolute path of the - datastore in which the virtual machine is located. - The absolute path is of the form //datastore/ - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/datastore/.*? - type: string - folder: - description: folder is the absolute path of the - folder where virtual machines are located. The - absolute path is of the form //vm/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/vm/.*? - type: string - networks: - description: networks is the list of port group - network names within this failure domain. Currently, - we only support a single interface per RHCOS virtual - machine. The available networks (port groups) - can be listed using `govc ls 'network/*'` The - single interface should be the absolute path of - the form //network/. - items: - type: string - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-type: atomic - resourcePool: - description: resourcePool is the absolute path of - the resource pool where virtual machines will - be created. The absolute path is of the form //host//Resources/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*?/Resources.* - type: string - template: - description: "template is the full inventory path - of the virtual machine or template that will be - cloned when creating new machines in this failure - domain. The maximum length of the path is 2048 - characters. \n When omitted, the template will - be calculated by the control plane machineset - operator based on the region and zone defined - in VSpherePlatformFailureDomainSpec. For example, - for zone=zonea, region=region1, and infrastructure - name=test, the template path would be calculated - as //vm/test-rhcos-region1-zonea." - maxLength: 2048 - minLength: 1 - pattern: ^/.*?/vm/.*? - type: string - required: - - computeCluster - - datacenter - - datastore - - networks - type: object - zone: - description: zone defines the name of a zone tag that - will be attached to a vCenter cluster. The tag category - in vCenter must be named openshift-zone. - maxLength: 80 - minLength: 1 - type: string - required: - - name - - region - - server - - topology - - zone - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeNetworking: - description: nodeNetworking contains the definition of internal - and external network constraints for assigning the node's - networking. If this field is omitted, networking defaults - to the legacy address selection behavior which is to only - support a single address and return the first one found. - properties: - external: - description: external represents the network configuration - of the node that is externally routable. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - internal: - description: internal represents the network configuration - of the node that is routable only within the cluster. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - vcenters: - description: vcenters holds the connection details for services - to communicate with vCenter. Currently, only a single vCenter - is supported. --- - items: - description: VSpherePlatformVCenterSpec stores the vCenter - connection fields. This is used by the vSphere CCM. - properties: - datacenters: - description: The vCenter Datacenters in which the RHCOS - vm guests are located. This field will be used by - the Cloud Controller Manager. Each datacenter listed - here should be used within a topology. - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - port: - description: port is the TCP port that will be used - to communicate to the vCenter endpoint. When omitted, - this means the user has no opinion and it is up to - the platform to choose a sensible default, which is - subject to change over time. - format: int32 - maximum: 32767 - minimum: 1 - type: integer - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - type: string - required: - - datacenters - - server - type: object - maxItems: 1 - minItems: 0 - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - default: HighlyAvailable - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation The 'External' mode indicates that the control plane is - hosted externally to the cluster and that its components are not - visible within the cluster. - enum: - - HighlyAvailable - - SingleReplica - - External - type: string - cpuPartitioning: - default: None - description: cpuPartitioning expresses if CPU partitioning is a currently - enabled feature in the cluster. CPU Partitioning means that this - cluster can support partitioning workloads to specific CPU Sets. - Valid values are "None" and "AllNodes". When omitted, the default - value is "None". The default value of "None" indicates that no nodes - will be setup with CPU partitioning. The "AllNodes" value indicates - that all nodes have been setup with CPU partitioning, and can then - be further configured via the PerformanceProfile API. - enum: - - None - - AllNodes - type: string - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - default: HighlyAvailable - description: 'infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is ''HighlyAvailable'', which represents the - behavior operators have in a "normal" cluster. The ''SingleReplica'' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation NOTE: External - topology mode is not applicable for this field.' - enum: - - HighlyAvailable - - SingleReplica - type: string - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - properties: - region: - description: region specifies the region for Alibaba Cloud - resources created for the cluster. - pattern: ^[0-9A-Za-z-]+$ - type: string - resourceGroupID: - description: resourceGroupID is the ID of the resource group - for the cluster. - pattern: ^(rg-[0-9A-Za-z]+)?$ - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Alibaba Cloud resources created for the cluster. - items: - description: AlibabaCloudResourceTag is the set of tags - to add to apply to resources. - properties: - key: - description: key is the key of the tag. - maxLength: 128 - minLength: 1 - type: string - value: - description: value is the value of the tag. - maxLength: 128 - minLength: 1 - type: string - required: - - key - - value - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - required: - - region - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html - for information on tagging AWS resources. AWS supports a - maximum of 50 tags per resource. OpenShift reserves 25 tags - for its use, leaving 25 tags available for the user. - items: - description: AWSResourceTag is a tag to apply to AWS resources - created for the cluster. - properties: - key: - description: key is the key of the tag - maxLength: 128 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - value: - description: value is the value of the tag. Some AWS - service do not support empty values. Since tags are - added to resources in many services, the length of - the tag value must meet the requirements of all services. - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 25 - type: array - x-kubernetes-list-type: atomic - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - properties: - armEndpoint: - description: armEndpoint specifies a URL to use for resource - management in non-soverign clouds such as Azure Stack. - type: string - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - enum: - - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureStackCloud - type: string - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags - for information on tagging Azure resources. Due to limitations - on Automation, Content Delivery Network, DNS Azure resources, - a maximum of 15 tags may be applied. OpenShift reserves - 5 tags for internal use, allowing 10 tags for user configuration. - items: - description: AzureResourceTag is a tag to apply to Azure - resources created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 128 characters and cannot be - empty. Key must begin with a letter, end with a letter, - number or underscore, and must contain only alphanumeric - characters and the following special characters `_ - . -`. - maxLength: 128 - minLength: 1 - pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ - type: string - value: - description: 'value is the value part of the tag. A - tag value can have a maximum of 256 characters and - cannot be empty. Value must contain only alphanumeric - characters and the following special characters `_ - + , - . / : ; < = > ? @`.' - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 10 - type: array - x-kubernetes-list-type: atomic - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on BareMetal platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - external: - description: External contains settings specific to the generic - External infrastructure provider. - properties: - cloudControllerManager: - description: cloudControllerManager contains settings specific - to the external Cloud Controller Manager (a.k.a. CCM or - CPI). When omitted, new nodes will be not tainted and no - extra initialization from the cloud controller manager is - expected. - properties: - state: - description: "state determines whether or not an external - Cloud Controller Manager is expected to be installed - within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - \n Valid values are \"External\", \"None\" and omitted. - When set to \"External\", new nodes will be tainted - as uninitialized when created, preventing them from - running workloads until they are initialized by the - cloud controller manager. When omitted or set to \"None\", - new nodes will be not tainted and no extra initialization - from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - x-kubernetes-validations: - - message: state is immutable once set - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: state may not be added or removed once set - rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) - && self.state != "External") - type: object - x-kubernetes-validations: - - message: cloudControllerManager may not be added or removed - once set - rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - properties: - cloudLoadBalancerConfig: - default: - dnsType: PlatformDefault - description: cloudLoadBalancerConfig is a union that contains - the IP addresses of API, API-Int and Ingress Load Balancers - created on the cloud platform. These values would not be - populated on on-prem platforms. These Load Balancer IPs - are used to configure the in-cluster DNS instances for API, - API-Int and Ingress services. `dnsType` is expected to be - set to `ClusterHosted` when these Load Balancer IP addresses - are populated and used. - nullable: true - properties: - clusterHosted: - description: clusterHosted holds the IP addresses of API, - API-Int and Ingress Load Balancers on Cloud Platforms. - The DNS solution hosted within the cluster use these - IP addresses to provide resolution for API, API-Int - and Ingress services. - properties: - apiIntLoadBalancerIPs: - description: apiIntLoadBalancerIPs holds Load Balancer - IPs for the internal API service. These Load Balancer - IP addresses can be IPv4 and/or IPv6 addresses. - Entries in the apiIntLoadBalancerIPs must be unique. - A maximum of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - apiLoadBalancerIPs: - description: apiLoadBalancerIPs holds Load Balancer - IPs for the API service. These Load Balancer IP - addresses can be IPv4 and/or IPv6 addresses. Could - be empty for private clusters. Entries in the apiLoadBalancerIPs - must be unique. A maximum of 16 IP addresses are - permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - ingressLoadBalancerIPs: - description: ingressLoadBalancerIPs holds IPs for - Ingress Load Balancers. These Load Balancer IP addresses - can be IPv4 and/or IPv6 addresses. Entries in the - ingressLoadBalancerIPs must be unique. A maximum - of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - type: object - dnsType: - default: PlatformDefault - description: dnsType indicates the type of DNS solution - in use within the cluster. Its default value of `PlatformDefault` - indicates that the cluster's DNS is the default provided - by the cloud platform. It can be set to `ClusterHosted` - to bypass the configuration of the cloud default DNS. - In this mode, the cluster needs to provide a self-hosted - DNS solution for the cluster's installation to succeed. - The cluster's use of the cloud's Load Balancers is unaffected - by this setting. The value is immutable after it has - been set at install time. Currently, there is no way - for the customer to add additional DNS entries into - the cluster hosted DNS. Enabling this functionality - allows the user to start their own DNS solution outside - the cluster after installation is complete. The customer - would be responsible for configuring this custom DNS - solution, and it can be run in addition to the in-cluster - DNS solution. - enum: - - ClusterHosted - - PlatformDefault - type: string - x-kubernetes-validations: - - message: dnsType is immutable - rule: oldSelf == '' || self == oldSelf - type: object - x-kubernetes-validations: - - message: clusterHosted is permitted only when dnsType is - ClusterHosted - rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' - ? !has(self.clusterHosted) : true' - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - resourceLabels: - description: resourceLabels is a list of additional labels - to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources - for information on labeling GCP resources. GCP supports - a maximum of 64 labels per resource. OpenShift reserves - 32 labels for internal use, allowing 32 labels for user - configuration. - items: - description: GCPResourceLabel is a label to apply to GCP - resources created for the cluster. - properties: - key: - description: key is the key part of the label. A label - key can have a maximum of 63 characters and cannot - be empty. Label key must begin with a lowercase letter, - and must contain only lowercase letters, numeric characters, - and the following special characters `_-`. Label key - must not have the reserved prefixes `kubernetes-io` - and `openshift-io`. - maxLength: 63 - minLength: 1 - pattern: ^[a-z][0-9a-z_-]{0,62}$ - type: string - x-kubernetes-validations: - - message: label keys must not start with either `openshift-io` - or `kubernetes-io` - rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' - value: - description: value is the value part of the label. A - label value can have a maximum of 63 characters and - cannot be empty. Value must contain only lowercase - letters, numeric characters, and the following special - characters `_-`. - maxLength: 63 - minLength: 1 - pattern: ^[0-9a-z_-]{1,63}$ - type: string - required: - - key - - value - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceLabels are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - resourceTags: - description: resourceTags is a list of additional tags to - apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview - for information on tagging GCP resources. GCP supports a - maximum of 50 tags per resource. - items: - description: GCPResourceTag is a tag to apply to GCP resources - created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 63 characters and cannot be - empty. Tag key must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `._-`. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ - type: string - parentID: - description: 'parentID is the ID of the hierarchical - resource where the tags are defined, e.g. at the Organization - or the Project level. To find the Organization or - Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, - https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. - An OrganizationID must consist of decimal numbers, - and cannot have leading zeroes. A ProjectID must be - 6 to 30 characters in length, can only contain lowercase - letters, numbers, and hyphens, and must start with - a letter, and cannot end with a hyphen.' - maxLength: 32 - minLength: 1 - pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) - type: string - value: - description: value is the value part of the tag. A tag - value can have a maximum of 63 characters and cannot - be empty. Tag value must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `_-.@%=+:,*#&(){}[]` and spaces. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ - type: string - required: - - key - - parentID - - value - type: object - maxItems: 50 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceLabels may only be configured during installation - rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) - || has(oldSelf.resourceLabels) && has(self.resourceLabels)' - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of an - IBM Cloud service. These endpoints are consumed by components - within the cluster to reach the respective IBM Cloud Services. - items: - description: IBMCloudServiceEndpoint stores the configuration - of a custom url to override existing defaults of IBM Cloud - Services. - properties: - name: - description: 'name is the name of the IBM Cloud service. - Possible values are: CIS, COS, DNSServices, GlobalSearch, - GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, - ResourceManager, or VPC. For example, the IBM Cloud - Private IAM service could be configured with the service - `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - Whereas the IBM Cloud Private VPC service for US South - (Dallas) could be configured with the service `name` - of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' - enum: - - CIS - - COS - - DNSServices - - GlobalSearch - - GlobalTagging - - HyperProtect - - IAM - - KeyProtect - - ResourceController - - ResourceManager - - VPC - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Nutanix platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on OpenStack platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Ovirt platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: object - powervs: - description: PowerVS contains settings specific to the Power Systems - Virtual Servers infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - region: - description: region holds the default Power VS region for - new Power VS resources created by the cluster. - type: string - resourceGroup: - description: 'resourceGroup is the resource group name for - new IBMCloud resources created for a cluster. The resource - group specified here will be used by cluster-image-registry-operator - to set up a COS Instance in IBMCloud for the cluster registry. - More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - When omitted, the image registry operator won''t be able - to configure storage, which results in the image registry - cluster operator not being in an available state.' - maxLength: 40 - pattern: ^[a-zA-Z0-9-_ ]+$ - type: string - x-kubernetes-validations: - - message: resourceGroup is immutable once set - rule: oldSelf == '' || self == oldSelf - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - zone: - description: 'zone holds the default zone for the new Power - VS resources created by the cluster. Note: Currently only - single-zone OCP clusters are supported' - type: string - type: object - x-kubernetes-validations: - - message: cannot unset resourceGroup once set - rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", - \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components - may not support all platforms, and must handle unrecognized - platforms as None if they do not support that platform. \n This - value will be synced with to the `status.platform` and `status.platformStatus.type`. - Currently this value cannot be changed once set." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on VSphere platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch deleted file mode 100644 index d127130ad..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch +++ /dev/null @@ -1,24 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format - value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml deleted file mode 100644 index a3eb6efe3..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml +++ /dev/null @@ -1,1761 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: infrastructures.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Infrastructure - listKind: InfrastructureList - plural: infrastructures - singular: infrastructure - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Infrastructure holds cluster-wide information about Infrastructure. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - type: object - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - external: - description: ExternalPlatformType represents generic infrastructure - provider. Platform-specific components should be supplemented - separately. - properties: - platformName: - default: Unknown - description: PlatformName holds the arbitrary string representing - the infrastructure provider name, expected to be set at - the installation time. This field is solely for informational - and reporting purposes and is not expected to be used for - decision-making. - type: string - x-kubernetes-validations: - - message: platform name cannot be changed once set - rule: oldSelf == 'Unknown' || self == oldSelf - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - failureDomains: - description: failureDomains configures failure domains information - for the Nutanix platform. When set, the failure domains - defined here may be used to spread Machines across prism - element clusters to improve fault tolerance of the cluster. - items: - description: NutanixFailureDomain configures failure domain - information for the Nutanix platform. - properties: - cluster: - description: cluster is to identify the cluster (the - Prism Element under management of the Prism Central), - in which the Machine's VM will be created. The cluster - identifier (uuid or name) can be obtained from the - Prism Central console or using the prism_central API. - properties: - name: - description: name is the resource name in the PC. - It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource in - the PC. It cannot be empty if the type is UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - name: - description: name defines the unique name of a failure - domain. Name is required and must be at most 64 characters - in length. It must consist of only lower case alphanumeric - characters and hyphens (-). It must start and end - with an alphanumeric character. This value is arbitrary - and is used to identify the failure domain within - the platform. - maxLength: 64 - minLength: 1 - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - subnets: - description: subnets holds a list of identifiers (one - or more) of the cluster's network subnets for the - Machine's VM to connect to. The subnet identifiers - (uuid or name) can be obtained from the Prism Central - console or using the prism_central API. - items: - description: NutanixResourceIdentifier holds the identity - of a Nutanix PC resource (cluster, image, subnet, - etc.) - properties: - name: - description: name is the resource name in the - PC. It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource - in the PC. It cannot be empty if the type is - UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - required: - - cluster - - name - - subnets - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - prismCentral: - description: prismCentral holds the endpoint address and port - to access the Nutanix Prism Central. When a cluster-wide - proxy is installed, by default, this endpoint will be accessed - via the proxy. Should you wish for communication with this - endpoint not to be proxied, please add the endpoint to the - proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS name - or IP address) of the Nutanix Prism Central or Element - (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the Nutanix - Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - prismElements: - description: prismElements holds one or more endpoint address - and port data to access the Nutanix Prism Elements (clusters) - of the Nutanix Prism Central. Currently we only support - one Prism Element (cluster) for an OpenShift cluster, where - all the Nutanix resources (VMs, subnets, volumes, etc.) - used in the OpenShift cluster are located. In the future, - we may support Nutanix resources (VMs, etc.) spread over - multiple Prism Elements (clusters) of the Prism Central. - items: - description: NutanixPrismElementEndpoint holds the name - and endpoint data for a Prism Element (cluster) - properties: - endpoint: - description: endpoint holds the endpoint address and - port data of the Prism Element (cluster). When a cluster-wide - proxy is installed, by default, this endpoint will - be accessed via the proxy. Should you wish for communication - with this endpoint not to be proxied, please add the - endpoint to the proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS - name or IP address) of the Nutanix Prism Central - or Element (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the - Nutanix Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - name: - description: name is the name of the Prism Element (cluster). - This value will correspond with the cluster field - configured on other resources (eg Machines, PVCs, - etc). - maxLength: 256 - type: string - required: - - endpoint - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - prismCentral - - prismElements - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - powervs: - description: PowerVS contains settings specific to the IBM Power - Systems Virtual Servers infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", - "Nutanix" and "None". Individual components may not support - all platforms, and must handle unrecognized platforms as None - if they do not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - failureDomains: - description: failureDomains contains the definition of region, - zone and the vCenter topology. If this is omitted failure - domains (regions and zones) will not be used. - items: - description: VSpherePlatformFailureDomainSpec holds the - region and zone failure domain and the vCenter topology - of that failure domain. - properties: - name: - description: name defines the arbitrary but unique name - of a failure domain. - maxLength: 256 - minLength: 1 - type: string - region: - description: region defines the name of a region tag - that will be attached to a vCenter datacenter. The - tag category in vCenter must be named openshift-region. - maxLength: 80 - minLength: 1 - type: string - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - minLength: 1 - type: string - topology: - description: Topology describes a given failure domain - using vSphere constructs - properties: - computeCluster: - description: computeCluster the absolute path of - the vCenter cluster in which virtual machine will - be located. The absolute path is of the form //host/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*? - type: string - datacenter: - description: datacenter is the name of vCenter datacenter - in which virtual machines will be located. The - maximum length of the datacenter name is 80 characters. - maxLength: 80 - type: string - datastore: - description: datastore is the absolute path of the - datastore in which the virtual machine is located. - The absolute path is of the form //datastore/ - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/datastore/.*? - type: string - folder: - description: folder is the absolute path of the - folder where virtual machines are located. The - absolute path is of the form //vm/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/vm/.*? - type: string - networks: - description: networks is the list of port group - network names within this failure domain. Currently, - we only support a single interface per RHCOS virtual - machine. The available networks (port groups) - can be listed using `govc ls 'network/*'` The - single interface should be the absolute path of - the form //network/. - items: - type: string - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-type: atomic - resourcePool: - description: resourcePool is the absolute path of - the resource pool where virtual machines will - be created. The absolute path is of the form //host//Resources/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*?/Resources.* - type: string - template: - description: "template is the full inventory path - of the virtual machine or template that will be - cloned when creating new machines in this failure - domain. The maximum length of the path is 2048 - characters. \n When omitted, the template will - be calculated by the control plane machineset - operator based on the region and zone defined - in VSpherePlatformFailureDomainSpec. For example, - for zone=zonea, region=region1, and infrastructure - name=test, the template path would be calculated - as //vm/test-rhcos-region1-zonea." - maxLength: 2048 - minLength: 1 - pattern: ^/.*?/vm/.*? - type: string - required: - - computeCluster - - datacenter - - datastore - - networks - type: object - zone: - description: zone defines the name of a zone tag that - will be attached to a vCenter cluster. The tag category - in vCenter must be named openshift-zone. - maxLength: 80 - minLength: 1 - type: string - required: - - name - - region - - server - - topology - - zone - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeNetworking: - description: nodeNetworking contains the definition of internal - and external network constraints for assigning the node's - networking. If this field is omitted, networking defaults - to the legacy address selection behavior which is to only - support a single address and return the first one found. - properties: - external: - description: external represents the network configuration - of the node that is externally routable. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - internal: - description: internal represents the network configuration - of the node that is routable only within the cluster. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - vcenters: - description: vcenters holds the connection details for services - to communicate with vCenter. Currently, only a single vCenter - is supported. --- - items: - description: VSpherePlatformVCenterSpec stores the vCenter - connection fields. This is used by the vSphere CCM. - properties: - datacenters: - description: The vCenter Datacenters in which the RHCOS - vm guests are located. This field will be used by - the Cloud Controller Manager. Each datacenter listed - here should be used within a topology. - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - port: - description: port is the TCP port that will be used - to communicate to the vCenter endpoint. When omitted, - this means the user has no opinion and it is up to - the platform to choose a sensible default, which is - subject to change over time. - format: int32 - maximum: 32767 - minimum: 1 - type: integer - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - type: string - required: - - datacenters - - server - type: object - maxItems: 1 - minItems: 0 - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - default: HighlyAvailable - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation The 'External' mode indicates that the control plane is - hosted externally to the cluster and that its components are not - visible within the cluster. - enum: - - HighlyAvailable - - SingleReplica - - External - type: string - cpuPartitioning: - default: None - description: cpuPartitioning expresses if CPU partitioning is a currently - enabled feature in the cluster. CPU Partitioning means that this - cluster can support partitioning workloads to specific CPU Sets. - Valid values are "None" and "AllNodes". When omitted, the default - value is "None". The default value of "None" indicates that no nodes - will be setup with CPU partitioning. The "AllNodes" value indicates - that all nodes have been setup with CPU partitioning, and can then - be further configured via the PerformanceProfile API. - enum: - - None - - AllNodes - type: string - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - default: HighlyAvailable - description: 'infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is ''HighlyAvailable'', which represents the - behavior operators have in a "normal" cluster. The ''SingleReplica'' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation NOTE: External - topology mode is not applicable for this field.' - enum: - - HighlyAvailable - - SingleReplica - type: string - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - properties: - region: - description: region specifies the region for Alibaba Cloud - resources created for the cluster. - pattern: ^[0-9A-Za-z-]+$ - type: string - resourceGroupID: - description: resourceGroupID is the ID of the resource group - for the cluster. - pattern: ^(rg-[0-9A-Za-z]+)?$ - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Alibaba Cloud resources created for the cluster. - items: - description: AlibabaCloudResourceTag is the set of tags - to add to apply to resources. - properties: - key: - description: key is the key of the tag. - maxLength: 128 - minLength: 1 - type: string - value: - description: value is the value of the tag. - maxLength: 128 - minLength: 1 - type: string - required: - - key - - value - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - required: - - region - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html - for information on tagging AWS resources. AWS supports a - maximum of 50 tags per resource. OpenShift reserves 25 tags - for its use, leaving 25 tags available for the user. - items: - description: AWSResourceTag is a tag to apply to AWS resources - created for the cluster. - properties: - key: - description: key is the key of the tag - maxLength: 128 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - value: - description: value is the value of the tag. Some AWS - service do not support empty values. Since tags are - added to resources in many services, the length of - the tag value must meet the requirements of all services. - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 25 - type: array - x-kubernetes-list-type: atomic - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - properties: - armEndpoint: - description: armEndpoint specifies a URL to use for resource - management in non-soverign clouds such as Azure Stack. - type: string - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - enum: - - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureStackCloud - type: string - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags - for information on tagging Azure resources. Due to limitations - on Automation, Content Delivery Network, DNS Azure resources, - a maximum of 15 tags may be applied. OpenShift reserves - 5 tags for internal use, allowing 10 tags for user configuration. - items: - description: AzureResourceTag is a tag to apply to Azure - resources created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 128 characters and cannot be - empty. Key must begin with a letter, end with a letter, - number or underscore, and must contain only alphanumeric - characters and the following special characters `_ - . -`. - maxLength: 128 - minLength: 1 - pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ - type: string - value: - description: 'value is the value part of the tag. A - tag value can have a maximum of 256 characters and - cannot be empty. Value must contain only alphanumeric - characters and the following special characters `_ - + , - . / : ; < = > ? @`.' - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 10 - type: array - x-kubernetes-list-type: atomic - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - external: - description: External contains settings specific to the generic - External infrastructure provider. - properties: - cloudControllerManager: - description: cloudControllerManager contains settings specific - to the external Cloud Controller Manager (a.k.a. CCM or - CPI). When omitted, new nodes will be not tainted and no - extra initialization from the cloud controller manager is - expected. - properties: - state: - description: "state determines whether or not an external - Cloud Controller Manager is expected to be installed - within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - \n Valid values are \"External\", \"None\" and omitted. - When set to \"External\", new nodes will be tainted - as uninitialized when created, preventing them from - running workloads until they are initialized by the - cloud controller manager. When omitted or set to \"None\", - new nodes will be not tainted and no extra initialization - from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - x-kubernetes-validations: - - message: state is immutable once set - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: state may not be added or removed once set - rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) - && self.state != "External") - type: object - x-kubernetes-validations: - - message: cloudControllerManager may not be added or removed - once set - rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - properties: - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of an - IBM Cloud service. These endpoints are consumed by components - within the cluster to reach the respective IBM Cloud Services. - items: - description: IBMCloudServiceEndpoint stores the configuration - of a custom url to override existing defaults of IBM Cloud - Services. - properties: - name: - description: 'name is the name of the IBM Cloud service. - Possible values are: CIS, COS, DNSServices, GlobalSearch, - GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, - ResourceManager, or VPC. For example, the IBM Cloud - Private IAM service could be configured with the service - `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - Whereas the IBM Cloud Private VPC service for US South - (Dallas) could be configured with the service `name` - of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' - enum: - - CIS - - COS - - DNSServices - - GlobalSearch - - GlobalTagging - - HyperProtect - - IAM - - KeyProtect - - ResourceController - - ResourceManager - - VPC - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on OpenStack platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: object - powervs: - description: PowerVS contains settings specific to the Power Systems - Virtual Servers infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - region: - description: region holds the default Power VS region for - new Power VS resources created by the cluster. - type: string - resourceGroup: - description: 'resourceGroup is the resource group name for - new IBMCloud resources created for a cluster. The resource - group specified here will be used by cluster-image-registry-operator - to set up a COS Instance in IBMCloud for the cluster registry. - More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - When omitted, the image registry operator won''t be able - to configure storage, which results in the image registry - cluster operator not being in an available state.' - maxLength: 40 - pattern: ^[a-zA-Z0-9-_ ]+$ - type: string - x-kubernetes-validations: - - message: resourceGroup is immutable once set - rule: oldSelf == '' || self == oldSelf - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - zone: - description: 'zone holds the default zone for the new Power - VS resources created by the cluster. Note: Currently only - single-zone OCP clusters are supported' - type: string - type: object - x-kubernetes-validations: - - message: cannot unset resourceGroup once set - rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", - \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components - may not support all platforms, and must handle unrecognized - platforms as None if they do not support that platform. \n This - value will be synced with to the `status.platform` and `status.platformStatus.type`. - Currently this value cannot be changed once set." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch deleted file mode 100644 index d127130ad..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch +++ /dev/null @@ -1,24 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format - value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 73205cfa1..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,2089 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: infrastructures.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Infrastructure - listKind: InfrastructureList - plural: infrastructures - singular: infrastructure - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Infrastructure holds cluster-wide information about Infrastructure. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - type: object - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - external: - description: ExternalPlatformType represents generic infrastructure - provider. Platform-specific components should be supplemented - separately. - properties: - platformName: - default: Unknown - description: PlatformName holds the arbitrary string representing - the infrastructure provider name, expected to be set at - the installation time. This field is solely for informational - and reporting purposes and is not expected to be used for - decision-making. - type: string - x-kubernetes-validations: - - message: platform name cannot be changed once set - rule: oldSelf == 'Unknown' || self == oldSelf - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - failureDomains: - description: failureDomains configures failure domains information - for the Nutanix platform. When set, the failure domains - defined here may be used to spread Machines across prism - element clusters to improve fault tolerance of the cluster. - items: - description: NutanixFailureDomain configures failure domain - information for the Nutanix platform. - properties: - cluster: - description: cluster is to identify the cluster (the - Prism Element under management of the Prism Central), - in which the Machine's VM will be created. The cluster - identifier (uuid or name) can be obtained from the - Prism Central console or using the prism_central API. - properties: - name: - description: name is the resource name in the PC. - It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource in - the PC. It cannot be empty if the type is UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - name: - description: name defines the unique name of a failure - domain. Name is required and must be at most 64 characters - in length. It must consist of only lower case alphanumeric - characters and hyphens (-). It must start and end - with an alphanumeric character. This value is arbitrary - and is used to identify the failure domain within - the platform. - maxLength: 64 - minLength: 1 - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - subnets: - description: subnets holds a list of identifiers (one - or more) of the cluster's network subnets for the - Machine's VM to connect to. The subnet identifiers - (uuid or name) can be obtained from the Prism Central - console or using the prism_central API. - items: - description: NutanixResourceIdentifier holds the identity - of a Nutanix PC resource (cluster, image, subnet, - etc.) - properties: - name: - description: name is the resource name in the - PC. It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource - in the PC. It cannot be empty if the type is - UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - required: - - cluster - - name - - subnets - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - prismCentral: - description: prismCentral holds the endpoint address and port - to access the Nutanix Prism Central. When a cluster-wide - proxy is installed, by default, this endpoint will be accessed - via the proxy. Should you wish for communication with this - endpoint not to be proxied, please add the endpoint to the - proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS name - or IP address) of the Nutanix Prism Central or Element - (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the Nutanix - Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - prismElements: - description: prismElements holds one or more endpoint address - and port data to access the Nutanix Prism Elements (clusters) - of the Nutanix Prism Central. Currently we only support - one Prism Element (cluster) for an OpenShift cluster, where - all the Nutanix resources (VMs, subnets, volumes, etc.) - used in the OpenShift cluster are located. In the future, - we may support Nutanix resources (VMs, etc.) spread over - multiple Prism Elements (clusters) of the Prism Central. - items: - description: NutanixPrismElementEndpoint holds the name - and endpoint data for a Prism Element (cluster) - properties: - endpoint: - description: endpoint holds the endpoint address and - port data of the Prism Element (cluster). When a cluster-wide - proxy is installed, by default, this endpoint will - be accessed via the proxy. Should you wish for communication - with this endpoint not to be proxied, please add the - endpoint to the proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS - name or IP address) of the Nutanix Prism Central - or Element (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the - Nutanix Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - name: - description: name is the name of the Prism Element (cluster). - This value will correspond with the cluster field - configured on other resources (eg Machines, PVCs, - etc). - maxLength: 256 - type: string - required: - - endpoint - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - prismCentral - - prismElements - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - powervs: - description: PowerVS contains settings specific to the IBM Power - Systems Virtual Servers infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", - "Nutanix" and "None". Individual components may not support - all platforms, and must handle unrecognized platforms as None - if they do not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - failureDomains: - description: failureDomains contains the definition of region, - zone and the vCenter topology. If this is omitted failure - domains (regions and zones) will not be used. - items: - description: VSpherePlatformFailureDomainSpec holds the - region and zone failure domain and the vCenter topology - of that failure domain. - properties: - name: - description: name defines the arbitrary but unique name - of a failure domain. - maxLength: 256 - minLength: 1 - type: string - region: - description: region defines the name of a region tag - that will be attached to a vCenter datacenter. The - tag category in vCenter must be named openshift-region. - maxLength: 80 - minLength: 1 - type: string - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - minLength: 1 - type: string - topology: - description: Topology describes a given failure domain - using vSphere constructs - properties: - computeCluster: - description: computeCluster the absolute path of - the vCenter cluster in which virtual machine will - be located. The absolute path is of the form //host/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*? - type: string - datacenter: - description: datacenter is the name of vCenter datacenter - in which virtual machines will be located. The - maximum length of the datacenter name is 80 characters. - maxLength: 80 - type: string - datastore: - description: datastore is the absolute path of the - datastore in which the virtual machine is located. - The absolute path is of the form //datastore/ - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/datastore/.*? - type: string - folder: - description: folder is the absolute path of the - folder where virtual machines are located. The - absolute path is of the form //vm/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/vm/.*? - type: string - networks: - description: networks is the list of port group - network names within this failure domain. Currently, - we only support a single interface per RHCOS virtual - machine. The available networks (port groups) - can be listed using `govc ls 'network/*'` The - single interface should be the absolute path of - the form //network/. - items: - type: string - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-type: atomic - resourcePool: - description: resourcePool is the absolute path of - the resource pool where virtual machines will - be created. The absolute path is of the form //host//Resources/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*?/Resources.* - type: string - template: - description: "template is the full inventory path - of the virtual machine or template that will be - cloned when creating new machines in this failure - domain. The maximum length of the path is 2048 - characters. \n When omitted, the template will - be calculated by the control plane machineset - operator based on the region and zone defined - in VSpherePlatformFailureDomainSpec. For example, - for zone=zonea, region=region1, and infrastructure - name=test, the template path would be calculated - as //vm/test-rhcos-region1-zonea." - maxLength: 2048 - minLength: 1 - pattern: ^/.*?/vm/.*? - type: string - required: - - computeCluster - - datacenter - - datastore - - networks - type: object - zone: - description: zone defines the name of a zone tag that - will be attached to a vCenter cluster. The tag category - in vCenter must be named openshift-zone. - maxLength: 80 - minLength: 1 - type: string - required: - - name - - region - - server - - topology - - zone - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeNetworking: - description: nodeNetworking contains the definition of internal - and external network constraints for assigning the node's - networking. If this field is omitted, networking defaults - to the legacy address selection behavior which is to only - support a single address and return the first one found. - properties: - external: - description: external represents the network configuration - of the node that is externally routable. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - internal: - description: internal represents the network configuration - of the node that is routable only within the cluster. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - vcenters: - description: vcenters holds the connection details for services - to communicate with vCenter. Currently, only a single vCenter - is supported. --- - items: - description: VSpherePlatformVCenterSpec stores the vCenter - connection fields. This is used by the vSphere CCM. - properties: - datacenters: - description: The vCenter Datacenters in which the RHCOS - vm guests are located. This field will be used by - the Cloud Controller Manager. Each datacenter listed - here should be used within a topology. - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - port: - description: port is the TCP port that will be used - to communicate to the vCenter endpoint. When omitted, - this means the user has no opinion and it is up to - the platform to choose a sensible default, which is - subject to change over time. - format: int32 - maximum: 32767 - minimum: 1 - type: integer - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - type: string - required: - - datacenters - - server - type: object - maxItems: 1 - minItems: 0 - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - default: HighlyAvailable - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation The 'External' mode indicates that the control plane is - hosted externally to the cluster and that its components are not - visible within the cluster. - enum: - - HighlyAvailable - - SingleReplica - - External - type: string - cpuPartitioning: - default: None - description: cpuPartitioning expresses if CPU partitioning is a currently - enabled feature in the cluster. CPU Partitioning means that this - cluster can support partitioning workloads to specific CPU Sets. - Valid values are "None" and "AllNodes". When omitted, the default - value is "None". The default value of "None" indicates that no nodes - will be setup with CPU partitioning. The "AllNodes" value indicates - that all nodes have been setup with CPU partitioning, and can then - be further configured via the PerformanceProfile API. - enum: - - None - - AllNodes - type: string - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - default: HighlyAvailable - description: 'infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is ''HighlyAvailable'', which represents the - behavior operators have in a "normal" cluster. The ''SingleReplica'' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation NOTE: External - topology mode is not applicable for this field.' - enum: - - HighlyAvailable - - SingleReplica - type: string - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - properties: - region: - description: region specifies the region for Alibaba Cloud - resources created for the cluster. - pattern: ^[0-9A-Za-z-]+$ - type: string - resourceGroupID: - description: resourceGroupID is the ID of the resource group - for the cluster. - pattern: ^(rg-[0-9A-Za-z]+)?$ - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Alibaba Cloud resources created for the cluster. - items: - description: AlibabaCloudResourceTag is the set of tags - to add to apply to resources. - properties: - key: - description: key is the key of the tag. - maxLength: 128 - minLength: 1 - type: string - value: - description: value is the value of the tag. - maxLength: 128 - minLength: 1 - type: string - required: - - key - - value - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - required: - - region - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html - for information on tagging AWS resources. AWS supports a - maximum of 50 tags per resource. OpenShift reserves 25 tags - for its use, leaving 25 tags available for the user. - items: - description: AWSResourceTag is a tag to apply to AWS resources - created for the cluster. - properties: - key: - description: key is the key of the tag - maxLength: 128 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - value: - description: value is the value of the tag. Some AWS - service do not support empty values. Since tags are - added to resources in many services, the length of - the tag value must meet the requirements of all services. - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 25 - type: array - x-kubernetes-list-type: atomic - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - properties: - armEndpoint: - description: armEndpoint specifies a URL to use for resource - management in non-soverign clouds such as Azure Stack. - type: string - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - enum: - - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureStackCloud - type: string - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags - for information on tagging Azure resources. Due to limitations - on Automation, Content Delivery Network, DNS Azure resources, - a maximum of 15 tags may be applied. OpenShift reserves - 5 tags for internal use, allowing 10 tags for user configuration. - items: - description: AzureResourceTag is a tag to apply to Azure - resources created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 128 characters and cannot be - empty. Key must begin with a letter, end with a letter, - number or underscore, and must contain only alphanumeric - characters and the following special characters `_ - . -`. - maxLength: 128 - minLength: 1 - pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ - type: string - value: - description: 'value is the value part of the tag. A - tag value can have a maximum of 256 characters and - cannot be empty. Value must contain only alphanumeric - characters and the following special characters `_ - + , - . / : ; < = > ? @`.' - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 10 - type: array - x-kubernetes-list-type: atomic - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on BareMetal platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - external: - description: External contains settings specific to the generic - External infrastructure provider. - properties: - cloudControllerManager: - description: cloudControllerManager contains settings specific - to the external Cloud Controller Manager (a.k.a. CCM or - CPI). When omitted, new nodes will be not tainted and no - extra initialization from the cloud controller manager is - expected. - properties: - state: - description: "state determines whether or not an external - Cloud Controller Manager is expected to be installed - within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - \n Valid values are \"External\", \"None\" and omitted. - When set to \"External\", new nodes will be tainted - as uninitialized when created, preventing them from - running workloads until they are initialized by the - cloud controller manager. When omitted or set to \"None\", - new nodes will be not tainted and no extra initialization - from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - x-kubernetes-validations: - - message: state is immutable once set - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: state may not be added or removed once set - rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) - && self.state != "External") - type: object - x-kubernetes-validations: - - message: cloudControllerManager may not be added or removed - once set - rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - properties: - cloudLoadBalancerConfig: - default: - dnsType: PlatformDefault - description: cloudLoadBalancerConfig is a union that contains - the IP addresses of API, API-Int and Ingress Load Balancers - created on the cloud platform. These values would not be - populated on on-prem platforms. These Load Balancer IPs - are used to configure the in-cluster DNS instances for API, - API-Int and Ingress services. `dnsType` is expected to be - set to `ClusterHosted` when these Load Balancer IP addresses - are populated and used. - nullable: true - properties: - clusterHosted: - description: clusterHosted holds the IP addresses of API, - API-Int and Ingress Load Balancers on Cloud Platforms. - The DNS solution hosted within the cluster use these - IP addresses to provide resolution for API, API-Int - and Ingress services. - properties: - apiIntLoadBalancerIPs: - description: apiIntLoadBalancerIPs holds Load Balancer - IPs for the internal API service. These Load Balancer - IP addresses can be IPv4 and/or IPv6 addresses. - Entries in the apiIntLoadBalancerIPs must be unique. - A maximum of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - apiLoadBalancerIPs: - description: apiLoadBalancerIPs holds Load Balancer - IPs for the API service. These Load Balancer IP - addresses can be IPv4 and/or IPv6 addresses. Could - be empty for private clusters. Entries in the apiLoadBalancerIPs - must be unique. A maximum of 16 IP addresses are - permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - ingressLoadBalancerIPs: - description: ingressLoadBalancerIPs holds IPs for - Ingress Load Balancers. These Load Balancer IP addresses - can be IPv4 and/or IPv6 addresses. Entries in the - ingressLoadBalancerIPs must be unique. A maximum - of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - type: object - dnsType: - default: PlatformDefault - description: dnsType indicates the type of DNS solution - in use within the cluster. Its default value of `PlatformDefault` - indicates that the cluster's DNS is the default provided - by the cloud platform. It can be set to `ClusterHosted` - to bypass the configuration of the cloud default DNS. - In this mode, the cluster needs to provide a self-hosted - DNS solution for the cluster's installation to succeed. - The cluster's use of the cloud's Load Balancers is unaffected - by this setting. The value is immutable after it has - been set at install time. Currently, there is no way - for the customer to add additional DNS entries into - the cluster hosted DNS. Enabling this functionality - allows the user to start their own DNS solution outside - the cluster after installation is complete. The customer - would be responsible for configuring this custom DNS - solution, and it can be run in addition to the in-cluster - DNS solution. - enum: - - ClusterHosted - - PlatformDefault - type: string - x-kubernetes-validations: - - message: dnsType is immutable - rule: oldSelf == '' || self == oldSelf - type: object - x-kubernetes-validations: - - message: clusterHosted is permitted only when dnsType is - ClusterHosted - rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' - ? !has(self.clusterHosted) : true' - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - resourceLabels: - description: resourceLabels is a list of additional labels - to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources - for information on labeling GCP resources. GCP supports - a maximum of 64 labels per resource. OpenShift reserves - 32 labels for internal use, allowing 32 labels for user - configuration. - items: - description: GCPResourceLabel is a label to apply to GCP - resources created for the cluster. - properties: - key: - description: key is the key part of the label. A label - key can have a maximum of 63 characters and cannot - be empty. Label key must begin with a lowercase letter, - and must contain only lowercase letters, numeric characters, - and the following special characters `_-`. Label key - must not have the reserved prefixes `kubernetes-io` - and `openshift-io`. - maxLength: 63 - minLength: 1 - pattern: ^[a-z][0-9a-z_-]{0,62}$ - type: string - x-kubernetes-validations: - - message: label keys must not start with either `openshift-io` - or `kubernetes-io` - rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' - value: - description: value is the value part of the label. A - label value can have a maximum of 63 characters and - cannot be empty. Value must contain only lowercase - letters, numeric characters, and the following special - characters `_-`. - maxLength: 63 - minLength: 1 - pattern: ^[0-9a-z_-]{1,63}$ - type: string - required: - - key - - value - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceLabels are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - resourceTags: - description: resourceTags is a list of additional tags to - apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview - for information on tagging GCP resources. GCP supports a - maximum of 50 tags per resource. - items: - description: GCPResourceTag is a tag to apply to GCP resources - created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 63 characters and cannot be - empty. Tag key must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `._-`. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ - type: string - parentID: - description: 'parentID is the ID of the hierarchical - resource where the tags are defined, e.g. at the Organization - or the Project level. To find the Organization or - Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, - https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. - An OrganizationID must consist of decimal numbers, - and cannot have leading zeroes. A ProjectID must be - 6 to 30 characters in length, can only contain lowercase - letters, numbers, and hyphens, and must start with - a letter, and cannot end with a hyphen.' - maxLength: 32 - minLength: 1 - pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) - type: string - value: - description: value is the value part of the tag. A tag - value can have a maximum of 63 characters and cannot - be empty. Tag value must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `_-.@%=+:,*#&(){}[]` and spaces. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ - type: string - required: - - key - - parentID - - value - type: object - maxItems: 50 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceLabels may only be configured during installation - rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) - || has(oldSelf.resourceLabels) && has(self.resourceLabels)' - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of an - IBM Cloud service. These endpoints are consumed by components - within the cluster to reach the respective IBM Cloud Services. - items: - description: IBMCloudServiceEndpoint stores the configuration - of a custom url to override existing defaults of IBM Cloud - Services. - properties: - name: - description: 'name is the name of the IBM Cloud service. - Possible values are: CIS, COS, DNSServices, GlobalSearch, - GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, - ResourceManager, or VPC. For example, the IBM Cloud - Private IAM service could be configured with the service - `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - Whereas the IBM Cloud Private VPC service for US South - (Dallas) could be configured with the service `name` - of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' - enum: - - CIS - - COS - - DNSServices - - GlobalSearch - - GlobalTagging - - HyperProtect - - IAM - - KeyProtect - - ResourceController - - ResourceManager - - VPC - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Nutanix platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on OpenStack platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Ovirt platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: object - powervs: - description: PowerVS contains settings specific to the Power Systems - Virtual Servers infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - region: - description: region holds the default Power VS region for - new Power VS resources created by the cluster. - type: string - resourceGroup: - description: 'resourceGroup is the resource group name for - new IBMCloud resources created for a cluster. The resource - group specified here will be used by cluster-image-registry-operator - to set up a COS Instance in IBMCloud for the cluster registry. - More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - When omitted, the image registry operator won''t be able - to configure storage, which results in the image registry - cluster operator not being in an available state.' - maxLength: 40 - pattern: ^[a-zA-Z0-9-_ ]+$ - type: string - x-kubernetes-validations: - - message: resourceGroup is immutable once set - rule: oldSelf == '' || self == oldSelf - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - zone: - description: 'zone holds the default zone for the new Power - VS resources created by the cluster. Note: Currently only - single-zone OCP clusters are supported' - type: string - type: object - x-kubernetes-validations: - - message: cannot unset resourceGroup once set - rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", - \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components - may not support all platforms, and must handle unrecognized - platforms as None if they do not support that platform. \n This - value will be synced with to the `status.platform` and `status.platformStatus.type`. - Currently this value cannot be changed once set." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on VSphere platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch deleted file mode 100644 index d127130ad..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch +++ /dev/null @@ -1,24 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format - value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml deleted file mode 100644 index c582dccb1..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ /dev/null @@ -1,553 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: ingresses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Ingress - listKind: IngressList - plural: ingresses - singular: ingress - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Ingress holds cluster-wide information about ingress, including - the default ingress domain used for routes. The canonical name is `cluster`. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - appsDomain: - description: appsDomain is an optional domain to use instead of the - one specified in the domain field when a Route is created without - specifying an explicit host. If appsDomain is nonempty, this value - is used to generate default host values for Route. Unlike domain, - appsDomain may be modified after installation. This assumes a new - ingresscontroller has been setup with a wildcard certificate. - type: string - componentRoutes: - description: "componentRoutes is an optional list of routes that are - managed by OpenShift components that a cluster-admin is able to - configure the hostname and serving certificate for. The namespace - and name of each route in this list should match an existing entry - in the status.componentRoutes list. \n To determine the set of configurable - Routes, look at namespace and name of entries in the .status.componentRoutes - list, where participating operators write the status of configurable - routes." - items: - description: ComponentRouteSpec allows for configuration of a route's - hostname and serving certificate. - properties: - hostname: - description: hostname is the hostname that should be used by - the route. - pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - type: string - name: - description: "name is the logical name of the route to customize. - \n The namespace and name of this componentRoute must match - a corresponding entry in the list of status.componentRoutes - if the route is to be customized." - maxLength: 256 - minLength: 1 - type: string - namespace: - description: "namespace is the namespace of the route to customize. - \n The namespace and name of this componentRoute must match - a corresponding entry in the list of status.componentRoutes - if the route is to be customized." - maxLength: 63 - minLength: 1 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - type: string - servingCertKeyPairSecret: - description: servingCertKeyPairSecret is a reference to a secret - of type `kubernetes.io/tls` in the openshift-config namespace. - The serving cert/key pair must match and will be used by the - operator to fulfill the intent of serving with this name. - If the custom hostname uses the default routing suffix of - the cluster, the Secret specification for a serving certificate - will not be needed. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - required: - - hostname - - name - - namespace - type: object - type: array - x-kubernetes-list-map-keys: - - namespace - - name - x-kubernetes-list-type: map - domain: - description: "domain is used to generate a default host name for a - route when the route's host name is empty. The generated host name - will follow this pattern: \"..\". - \n It is also used as the default wildcard domain suffix for ingress. - The default ingresscontroller domain will follow this pattern: \"*.\". - \n Once set, changing domain is not currently supported." - type: string - loadBalancer: - description: loadBalancer contains the load balancer details in general - which are not only specific to the underlying infrastructure provider - of the current cluster and are required for Ingress Controller to - work on OpenShift. - properties: - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for the ingress load balancers. When - omitted, this means the user has no opinion and the platform - is left to choose reasonable defaults. These defaults are subject - to change over time. - properties: - aws: - description: aws contains settings specific to the Amazon - Web Services infrastructure provider. - properties: - type: - description: "type allows user to set a load balancer - type. When this field is set the default ingresscontroller - will get created using the specified LBType. If this - field is not set then the default ingress controller - of LBType Classic will be created. Valid values are: - \n * \"Classic\": A Classic Load Balancer that makes - routing decisions at either the transport layer (TCP/SSL) - or the application layer (HTTP/HTTPS). See the following - for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb - \n * \"NLB\": A Network Load Balancer that makes routing - decisions at the transport layer (TCP/SSL). See the - following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" - enum: - - NLB - - Classic - type: string - required: - - type - type: object - type: - description: type is the underlying infrastructure provider - for the cluster. Allowed values are "AWS", "Azure", "BareMetal", - "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", - "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and - "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do - not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - type: object - type: object - requiredHSTSPolicies: - description: "requiredHSTSPolicies specifies HSTS policies that are - required to be set on newly created or updated routes matching - the domainPattern/s and namespaceSelector/s that are specified in - the policy. Each requiredHSTSPolicy must have at least a domainPattern - and a maxAge to validate a route HSTS Policy route annotation, and - affect route admission. \n A candidate route is checked for HSTS - Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" - E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains - \n - For each candidate route, if it matches a requiredHSTSPolicy - domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, - and includeSubdomainsPolicy must be valid to be admitted. Otherwise, - the route is rejected. - The first match, by domainPattern and optional - namespaceSelector, in the ordering of the RequiredHSTSPolicies determines - the route's admission status. - If the candidate route doesn't match - any requiredHSTSPolicy domainPattern and optional namespaceSelector, - then it may use any HSTS Policy annotation. \n The HSTS policy configuration - may be changed after routes have already been created. An update - to a previously admitted route may then fail if the updated route - does not conform to the updated HSTS policy configuration. However, - changing the HSTS policy configuration will not cause a route that - is already admitted to stop working. \n Note that if there are no - RequiredHSTSPolicies, any HSTS Policy annotation on the route is - valid." - items: - properties: - domainPatterns: - description: "domainPatterns is a list of domains for which - the desired HSTS annotations are required. If domainPatterns - is specified and a route is created with a spec.host matching - one of the domains, the route must specify the HSTS Policy - components described in the matching RequiredHSTSPolicy. \n - The use of wildcards is allowed like this: *.foo.com matches - everything under foo.com. foo.com only matches foo.com, so - to cover foo.com and everything under it, you must specify - *both*." - items: - type: string - minItems: 1 - type: array - includeSubDomainsPolicy: - description: 'includeSubDomainsPolicy means the HSTS Policy - should apply to any subdomains of the host''s domain name. Thus, - for the host bar.foo.com, if includeSubDomainsPolicy was set - to RequireIncludeSubDomains: - the host app.bar.foo.com would - inherit the HSTS Policy of bar.foo.com - the host bar.foo.com - would inherit the HSTS Policy of bar.foo.com - the host foo.com - would NOT inherit the HSTS Policy of bar.foo.com - the host - def.foo.com would NOT inherit the HSTS Policy of bar.foo.com' - enum: - - RequireIncludeSubDomains - - RequireNoIncludeSubDomains - - NoOpinion - type: string - maxAge: - description: maxAge is the delta time range in seconds during - which hosts are regarded as HSTS hosts. If set to 0, it negates - the effect, and hosts are removed as HSTS hosts. If set to - 0 and includeSubdomains is specified, all subdomains of the - host are also removed as HSTS hosts. maxAge is a time-to-live - value, and if this policy is not refreshed on a client, the - HSTS policy will eventually expire on that client. - properties: - largestMaxAge: - description: The largest allowed value (in seconds) of the - RequiredHSTSPolicy max-age This value can be left unspecified, - in which case no upper limit is enforced. - format: int32 - maximum: 2147483647 - minimum: 0 - type: integer - smallestMaxAge: - description: The smallest allowed value (in seconds) of - the RequiredHSTSPolicy max-age Setting max-age=0 allows - the deletion of an existing HSTS header from a host. This - is a necessary tool for administrators to quickly correct - mistakes. This value can be left unspecified, in which - case no lower limit is enforced. - format: int32 - maximum: 2147483647 - minimum: 0 - type: integer - type: object - namespaceSelector: - description: namespaceSelector specifies a label selector such - that the policy applies only to those routes that are in namespaces - with labels that match the selector, and are in one of the - DomainPatterns. Defaults to the empty LabelSelector, which - matches everything. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - preloadPolicy: - description: preloadPolicy directs the client to include hosts - in its host preload list so that it never needs to do an initial - load to get the HSTS header (note that this is not defined - in RFC 6797 and is therefore client implementation-dependent). - enum: - - RequirePreload - - RequireNoPreload - - NoOpinion - type: string - required: - - domainPatterns - type: object - type: array - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - componentRoutes: - description: componentRoutes is where participating operators place - the current route status for routes whose hostnames and serving - certificates can be customized by the cluster-admin. - items: - description: ComponentRouteStatus contains information allowing - configuration of a route's hostname and serving certificate. - properties: - conditions: - description: "conditions are used to communicate the state of - the componentRoutes entry. \n Supported conditions include - Available, Degraded and Progressing. \n If available is true, - the content served by the route can be accessed by users. - This includes cases where a default may continue to serve - content while the customized route specified by the cluster-admin - is being configured. \n If Degraded is true, that means something - has gone wrong trying to handle the componentRoutes entry. - The currentHostnames field may or may not be in effect. \n - If Progressing is true, that means the component is taking - some action related to the componentRoutes entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: consumingUsers is a slice of ServiceAccounts that - need to have read permission on the servingCertKeyPairSecret - secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - currentHostnames: - description: currentHostnames is the list of current names used - by the route. Typically, this list should consist of a single - hostname, but if multiple hostnames are supported by the route - the operator may write multiple entries to this list. - items: - description: "Hostname is an alias for hostname string validation. - \n The left operand of the | is the original kubebuilder - hostname validation format, which is incorrect because it - allows upper case letters, disallows hyphen or number in - the TLD, and allows labels to start/end in non-alphanumeric - characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. - ^([a-zA-Z0-9\\p{S}\\p{L}]((-?[a-zA-Z0-9\\p{S}\\p{L}]{0,62})?)|([a-zA-Z0-9\\p{S}\\p{L}](([a-zA-Z0-9-\\p{S}\\p{L}]{0,61}[a-zA-Z0-9\\p{S}\\p{L}])?)(\\.)){1,}([a-zA-Z\\p{L}]){2,63})$ - \n The right operand of the | is a new pattern that mimics - the current API route admission validation on hostname, - except that it allows hostnames longer than the maximum - length: ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - \n Both operand patterns are made available so that modifications - on ingress spec can still happen after an invalid hostname - was saved via validation by the incorrect left operand of - the | operator." - pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - type: string - minItems: 1 - type: array - defaultHostname: - description: defaultHostname is the hostname of this route prior - to customization. - pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - type: string - name: - description: "name is the logical name of the route to customize. - It does not have to be the actual name of a route resource - but it cannot be renamed. \n The namespace and name of this - componentRoute must match a corresponding entry in the list - of spec.componentRoutes if the route is to be customized." - maxLength: 256 - minLength: 1 - type: string - namespace: - description: "namespace is the namespace of the route to customize. - It must be a real namespace. Using an actual namespace ensures - that no two components will conflict and the same component - can be installed multiple times. \n The namespace and name - of this componentRoute must match a corresponding entry in - the list of spec.componentRoutes if the route is to be customized." - maxLength: 63 - minLength: 1 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - type: string - relatedObjects: - description: relatedObjects is a list of resources which are - useful when debugging or inspecting how spec.componentRoutes - is applied. - items: - description: ObjectReference contains enough information to - let you inspect or modify the referred object. - properties: - group: - description: group of the referent. - type: string - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: resource of the referent. - type: string - required: - - group - - name - - resource - type: object - minItems: 1 - type: array - required: - - defaultHostname - - name - - namespace - - relatedObjects - type: object - type: array - x-kubernetes-list-map-keys: - - namespace - - name - x-kubernetes-list-type: map - defaultPlacement: - description: "defaultPlacement is set at installation time to control - which nodes will host the ingress router pods by default. The options - are control-plane nodes or worker nodes. \n This field works by - dictating how the Cluster Ingress Operator will consider unset replicas - and nodePlacement fields in IngressController resources when creating - the corresponding Deployments. \n See the documentation for the - IngressController replicas and nodePlacement fields for more information. - \n When omitted, the default value is Workers" - enum: - - ControlPlane - - Workers - - "" - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 5392f14c6..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: networks.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource. \n Compatibility level 1: Stable within a major release for a - minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume - the NetworkStatus, as it indicates the currently deployed configuration. - Currently, most spec fields are immutable after installation. Please - view the individual ones for further details on each. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - externalIP: - description: externalIP defines configuration for controllers that - affect Service.ExternalIP. If nil, then ExternalIP is not allowed - to be set. - properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to - automatically assign Service.ExternalIP. These are assigned - when the service is of type LoadBalancer. In general, this is - only useful for bare-metal clusters. In Openshift 3.x, this - was misleadingly called "IngressIPs". Automatically assigned - External IPs are not affected by any ExternalIPPolicy rules. - Currently, only one entry may be provided. - items: - type: string - type: array - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be - set. - properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. - items: - type: string - type: array - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - items: - type: string - type: array - type: object - type: object - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently - supported values are: - OpenShiftSDN This field is immutable after - installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - items: - type: string - type: array - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. - If not specified, the default of 30000-32767 will be used. Such - Services without a NodePort specified will have one automatically - allocated from this range. This parameter can be updated after the - cluster is installed. - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - conditions: - description: 'conditions represents the observations of a network.config - current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", - "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", - "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - migration: - description: Migration contains the cluster network migration configuration. - properties: - mtu: - description: MTU contains the MTU migration configuration. - properties: - machine: - description: Machine contains MTU migration configuration - for the machine's uplink. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: Network contains MTU migration configuration - for the default network. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: 'NetworkType is the target plugin that is to be deployed. - Currently supported values are: OpenShiftSDN, OVNKubernetes' - enum: - - OpenShiftSDN - - OVNKubernetes - type: string - type: object - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - items: - type: string - type: array - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml deleted file mode 100644 index d71799f59..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: networks.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource. \n Compatibility level 1: Stable within a major release for a - minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume - the NetworkStatus, as it indicates the currently deployed configuration. - Currently, most spec fields are immutable after installation. Please - view the individual ones for further details on each. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - externalIP: - description: externalIP defines configuration for controllers that - affect Service.ExternalIP. If nil, then ExternalIP is not allowed - to be set. - properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to - automatically assign Service.ExternalIP. These are assigned - when the service is of type LoadBalancer. In general, this is - only useful for bare-metal clusters. In Openshift 3.x, this - was misleadingly called "IngressIPs". Automatically assigned - External IPs are not affected by any ExternalIPPolicy rules. - Currently, only one entry may be provided. - items: - type: string - type: array - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be - set. - properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. - items: - type: string - type: array - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - items: - type: string - type: array - type: object - type: object - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently - supported values are: - OpenShiftSDN This field is immutable after - installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - items: - type: string - type: array - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. - If not specified, the default of 30000-32767 will be used. Such - Services without a NodePort specified will have one automatically - allocated from this range. This parameter can be updated after the - cluster is installed. - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - conditions: - description: 'conditions represents the observations of a network.config - current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", - "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", - "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - migration: - description: Migration contains the cluster network migration configuration. - properties: - mtu: - description: MTU contains the MTU migration configuration. - properties: - machine: - description: Machine contains MTU migration configuration - for the machine's uplink. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: Network contains MTU migration configuration - for the default network. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: 'NetworkType is the target plugin that is to be deployed. - Currently supported values are: OpenShiftSDN, OVNKubernetes' - enum: - - OpenShiftSDN - - OVNKubernetes - type: string - type: object - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - items: - type: string - type: array - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 8ec000b89..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: networks.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource. \n Compatibility level 1: Stable within a major release for a - minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume - the NetworkStatus, as it indicates the currently deployed configuration. - Currently, most spec fields are immutable after installation. Please - view the individual ones for further details on each. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - externalIP: - description: externalIP defines configuration for controllers that - affect Service.ExternalIP. If nil, then ExternalIP is not allowed - to be set. - properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to - automatically assign Service.ExternalIP. These are assigned - when the service is of type LoadBalancer. In general, this is - only useful for bare-metal clusters. In Openshift 3.x, this - was misleadingly called "IngressIPs". Automatically assigned - External IPs are not affected by any ExternalIPPolicy rules. - Currently, only one entry may be provided. - items: - type: string - type: array - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be - set. - properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. - items: - type: string - type: array - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - items: - type: string - type: array - type: object - type: object - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently - supported values are: - OpenShiftSDN This field is immutable after - installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - items: - type: string - type: array - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. - If not specified, the default of 30000-32767 will be used. Such - Services without a NodePort specified will have one automatically - allocated from this range. This parameter can be updated after the - cluster is installed. - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - conditions: - description: 'conditions represents the observations of a network.config - current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", - "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", - "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - migration: - description: Migration contains the cluster network migration configuration. - properties: - mtu: - description: MTU contains the MTU migration configuration. - properties: - machine: - description: Machine contains MTU migration configuration - for the machine's uplink. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: Network contains MTU migration configuration - for the default network. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: 'NetworkType is the target plugin that is to be deployed. - Currently supported values are: OpenShiftSDN, OVNKubernetes' - enum: - - OpenShiftSDN - - OVNKubernetes - type: string - type: object - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - items: - type: string - type: array - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml deleted file mode 100644 index ab135b221..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1107 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: nodes.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Node - listKind: NodeList - plural: nodes - singular: node - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Node holds cluster-wide information about node specific features. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cgroupMode: - description: CgroupMode determines the cgroups version on the node - enum: - - v1 - - v2 - - "" - type: string - workerLatencyProfile: - description: WorkerLatencyProfile determins the how fast the kubelet - is updating the status and corresponding reaction of the cluster - enum: - - Default - - MediumUpdateAverageReaction - - LowUpdateSlowReaction - type: string - type: object - status: - description: status holds observed values. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml deleted file mode 100644 index bc588e098..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml +++ /dev/null @@ -1,698 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: oauths.config.openshift.io -spec: - group: config.openshift.io - names: - kind: OAuth - listKind: OAuthList - plural: oauths - singular: oauth - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "OAuth holds cluster-wide information about OAuth. The canonical - name is `cluster`. It is used to configure the integrated OAuth server. - This configuration is only honored when the top level Authentication config - has type set to IntegratedOAuth. \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - identityProviders: - description: identityProviders is an ordered list of ways for a user - to identify themselves. When this list is empty, no identities are - provisioned for users. - items: - description: IdentityProvider provides identities for users authenticating - using credentials - properties: - basicAuth: - description: basicAuth contains configuration options for the - BasicAuth IdP - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - tlsClientCert: - description: tlsClientCert is an optional reference to a - secret by name that contains the PEM-encoded TLS client - certificate to present when connecting to the server. - The key "tls.crt" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - tlsClientKey: - description: tlsClientKey is an optional reference to a - secret by name that contains the PEM-encoded TLS private - key for the client certificate referenced in tlsClientCert. - The key "tls.key" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - url: - description: url is the remote URL to connect to - type: string - type: object - github: - description: github enables user authentication using GitHub - credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. This can only be configured when hostname is set - to a non-empty value. The namespace for this config map - is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - hostname: - description: hostname is the optional domain (e.g. "mycompany.com") - for use with a hosted instance of GitHub Enterprise. It - must match the GitHub Enterprise settings value configured - at /setup/settings#hostname. - type: string - organizations: - description: organizations optionally restricts which organizations - are allowed to log in - items: - type: string - type: array - teams: - description: teams optionally restricts which teams are - allowed to log in. Format is /. - items: - type: string - type: array - type: object - gitlab: - description: gitlab enables user authentication using GitLab - credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - url: - description: url is the oauth server base URL - type: string - type: object - google: - description: google enables user authentication using Google - credentials - properties: - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - hostedDomain: - description: hostedDomain is the optional Google App domain - (e.g. "mycompany.com") to restrict logins to - type: string - type: object - htpasswd: - description: htpasswd enables user authentication using an HTPasswd - file to validate credentials - properties: - fileData: - description: fileData is a required reference to a secret - by name containing the data to use as the htpasswd file. - The key "htpasswd" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. If the specified htpasswd data is not - valid, the identity provider is not honored. The namespace - for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - keystone: - description: keystone enables user authentication using keystone - password credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - domainName: - description: domainName is required for keystone v3 - type: string - tlsClientCert: - description: tlsClientCert is an optional reference to a - secret by name that contains the PEM-encoded TLS client - certificate to present when connecting to the server. - The key "tls.crt" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - tlsClientKey: - description: tlsClientKey is an optional reference to a - secret by name that contains the PEM-encoded TLS private - key for the client certificate referenced in tlsClientCert. - The key "tls.key" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - url: - description: url is the remote URL to connect to - type: string - type: object - ldap: - description: ldap enables user authentication using LDAP credentials - properties: - attributes: - description: attributes maps LDAP attributes to identities - properties: - email: - description: email is the list of attributes whose values - should be used as the email address. Optional. If - unspecified, no email is set for the identity - items: - type: string - type: array - id: - description: id is the list of attributes whose values - should be used as the user ID. Required. First non-empty - attribute is used. At least one attribute is required. - If none of the listed attribute have a value, authentication - fails. LDAP standard identity attribute is "dn" - items: - type: string - type: array - name: - description: name is the list of attributes whose values - should be used as the display name. Optional. If unspecified, - no display name is set for the identity LDAP standard - display name attribute is "cn" - items: - type: string - type: array - preferredUsername: - description: preferredUsername is the list of attributes - whose values should be used as the preferred username. - LDAP standard login attribute is "uid" - items: - type: string - type: array - type: object - bindDN: - description: bindDN is an optional DN to bind with during - the search phase. - type: string - bindPassword: - description: bindPassword is an optional reference to a - secret by name containing a password to bind with during - the search phase. The key "bindPassword" is used to locate - the data. If specified and the secret or expected key - is not found, the identity provider is not honored. The - namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - insecure: - description: 'insecure, if true, indicates the connection - should not use TLS WARNING: Should not be set to `true` - with the URL scheme "ldaps://" as "ldaps://" URLs always - attempt to connect using TLS, even when `insecure` is - set to `true` When `true`, "ldap://" URLS connect insecurely. - When `false`, "ldap://" URLs are upgraded to a TLS connection - using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' - type: boolean - url: - description: 'url is an RFC 2255 URL which specifies the - LDAP search parameters to use. The syntax of the URL is: - ldap://host:port/basedn?attribute?scope?filter' - type: string - type: object - mappingMethod: - description: mappingMethod determines how identities from this - provider are mapped to users Defaults to "claim" - type: string - name: - description: 'name is used to qualify the identities returned - by this provider. - It MUST be unique and not shared by any - other identity provider used - It MUST be a valid path segment: - name cannot equal "." or ".." or contain "/" or "%" or ":" - Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' - type: string - openID: - description: openID enables user authentication using OpenID - credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - claims: - description: claims mappings - properties: - email: - description: email is the list of claims whose values - should be used as the email address. Optional. If - unspecified, no email is set for the identity - items: - type: string - type: array - x-kubernetes-list-type: atomic - groups: - description: groups is the list of claims value of which - should be used to synchronize groups from the OIDC - provider to OpenShift for the user. If multiple claims - are specified, the first one with a non-empty value - is used. - items: - description: OpenIDClaim represents a claim retrieved - from an OpenID provider's tokens or userInfo responses - minLength: 1 - type: string - type: array - x-kubernetes-list-type: atomic - name: - description: name is the list of claims whose values - should be used as the display name. Optional. If unspecified, - no display name is set for the identity - items: - type: string - type: array - x-kubernetes-list-type: atomic - preferredUsername: - description: preferredUsername is the list of claims - whose values should be used as the preferred username. - If unspecified, the preferred username is determined - from the value of the sub claim - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - extraAuthorizeParameters: - additionalProperties: - type: string - description: extraAuthorizeParameters are any custom parameters - to add to the authorize request. - type: object - extraScopes: - description: extraScopes are any scopes to request in addition - to the standard "openid" scope. - items: - type: string - type: array - issuer: - description: issuer is the URL that the OpenID Provider - asserts as its Issuer Identifier. It must use the https - scheme with no query or fragment component. - type: string - type: object - requestHeader: - description: requestHeader enables user authentication using - request header credentials - properties: - ca: - description: ca is a required reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. Specifically, it allows verification - of incoming requests to prevent header spoofing. The key - "ca.crt" is used to locate the data. If the config map - or expected key is not found, the identity provider is - not honored. If the specified ca data is not valid, the - identity provider is not honored. The namespace for this - config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - challengeURL: - description: challengeURL is a URL to redirect unauthenticated - /authorize requests to Unauthenticated requests from OAuth - clients which expect WWW-Authenticate challenges will - be redirected here. ${url} is replaced with the current - URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} - ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} - Required when challenge is set to true. - type: string - clientCommonNames: - description: clientCommonNames is an optional list of common - names to require a match from. If empty, any client certificate - validated against the clientCA bundle is considered authoritative. - items: - type: string - type: array - emailHeaders: - description: emailHeaders is the set of headers to check - for the email address - items: - type: string - type: array - headers: - description: headers is the set of headers to check for - identity information - items: - type: string - type: array - loginURL: - description: loginURL is a URL to redirect unauthenticated - /authorize requests to Unauthenticated requests from OAuth - clients which expect interactive logins will be redirected - here ${url} is replaced with the current URL, escaped - to be safe in a query parameter https://www.example.com/sso-login?then=${url} - ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} - Required when login is set to true. - type: string - nameHeaders: - description: nameHeaders is the set of headers to check - for the display name - items: - type: string - type: array - preferredUsernameHeaders: - description: preferredUsernameHeaders is the set of headers - to check for the preferred username - items: - type: string - type: array - type: object - type: - description: type identifies the identity provider type for - this entry. - type: string - type: object - type: array - x-kubernetes-list-type: atomic - templates: - description: templates allow you to customize pages like the login - page. - properties: - error: - description: error is the name of a secret that specifies a go - template to use to render error pages during the authentication - or grant flow. The key "errors.html" is used to locate the template - data. If specified and the secret or expected key is not found, - the default error page is used. If the specified template is - not valid, the default error page is used. If unspecified, the - default error page is used. The namespace for this secret is - openshift-config. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - login: - description: login is the name of a secret that specifies a go - template to use to render the login page. The key "login.html" - is used to locate the template data. If specified and the secret - or expected key is not found, the default login page is used. - If the specified template is not valid, the default login page - is used. If unspecified, the default login page is used. The - namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - providerSelection: - description: providerSelection is the name of a secret that specifies - a go template to use to render the provider selection page. - The key "providers.html" is used to locate the template data. - If specified and the secret or expected key is not found, the - default provider selection page is used. If the specified template - is not valid, the default provider selection page is used. If - unspecified, the default provider selection page is used. The - namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - type: object - tokenConfig: - description: tokenConfig contains options for authorization and access - tokens - properties: - accessTokenInactivityTimeout: - description: "accessTokenInactivityTimeout defines the token inactivity - timeout for tokens granted by any client. The value represents - the maximum amount of time that can occur between consecutive - uses of the token. Tokens become invalid if they are not used - within this temporal window. The user will need to acquire a - new token to regain access once a token times out. Takes valid - time duration string such as \"5m\", \"1.5h\" or \"2h45m\". - The minimum allowed value for duration is 300s (5 minutes). - If the timeout is configured per client, then that value takes - precedence. If the timeout value is not specified and the client - does not override the value, then tokens are valid until their - lifetime. \n WARNING: existing tokens' timeout will not be affected - (lowered) by changing this value" - type: string - accessTokenInactivityTimeoutSeconds: - description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: - setting this field has no effect.' - format: int32 - type: integer - accessTokenMaxAgeSeconds: - description: accessTokenMaxAgeSeconds defines the maximum age - of access tokens - format: int32 - type: integer - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml deleted file mode 100644 index ec2c7af3f..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: projects.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Project - listKind: ProjectList - plural: projects - singular: project - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Project holds cluster-wide information about Project. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - projectRequestMessage: - description: projectRequestMessage is the string presented to a user - if they are unable to request a project via the projectrequest api - endpoint - type: string - projectRequestTemplate: - description: projectRequestTemplate is the template to use for creating - projects in response to projectrequest. This must point to a template - in 'openshift-config' namespace. It is optional. If it is not specified, - a default template is used. - properties: - name: - description: name is the metadata.name of the referenced project - request template - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml deleted file mode 100644 index f7a427662..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,130 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: schedulers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Scheduler - listKind: SchedulerList - plural: schedulers - singular: scheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - type: string - profileCustomizations: - description: profileCustomizations contains configuration for modifying - the default behavior of existing scheduler profiles. - properties: - dynamicResourceAllocation: - description: dynamicResourceAllocation allows to enable or disable - dynamic resource allocation within the scheduler. Dynamic resource - allocation is an API for requesting and sharing resources between - pods and containers inside a pod. Third-party resource drivers - are responsible for tracking and allocating resources. Different - kinds of resources support arbitrary parameters for defining - requirements and initialization. Valid values are Enabled, Disabled - and omitted. When omitted, this means no opinion and the platform - is left to choose a reasonable default, which is subject to - change over time. The current default is Disabled. - enum: - - "" - - Enabled - - Disabled - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml deleted file mode 100644 index aa89c10a4..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: schedulers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Scheduler - listKind: SchedulerList - plural: schedulers - singular: scheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 071b9dff9..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,130 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: schedulers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Scheduler - listKind: SchedulerList - plural: schedulers - singular: scheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - type: string - profileCustomizations: - description: profileCustomizations contains configuration for modifying - the default behavior of existing scheduler profiles. - properties: - dynamicResourceAllocation: - description: dynamicResourceAllocation allows to enable or disable - dynamic resource allocation within the scheduler. Dynamic resource - allocation is an API for requesting and sharing resources between - pods and containers inside a pod. Third-party resource drivers - are responsible for tracking and allocating resources. Different - kinds of resources support arbitrary parameters for defining - requirements and initialization. Valid values are Enabled, Disabled - and omitted. When omitted, this means no opinion and the platform - is left to choose a reasonable default, which is subject to - change over time. The current default is Disabled. - enum: - - "" - - Enabled - - Disabled - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml deleted file mode 100644 index 94e7f015a..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml +++ /dev/null @@ -1,431 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - capability.openshift.io/name: Build - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: builds.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Build - listKind: BuildList - plural: builds - singular: build - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Build configures the behavior of OpenShift builds for the entire - cluster. This includes default settings that can be overridden in BuildConfig - objects, and overrides which are applied to all builds. \n The canonical - name is \"cluster\" \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds user-settable values for the build controller - configuration - properties: - additionalTrustedCA: - description: "AdditionalTrustedCA is a reference to a ConfigMap containing - additional CAs that should be trusted for image pushes and pulls - during builds. The namespace for this config map is openshift-config. - \n DEPRECATED: Additional CAs for image pull and push should be - set on image.config.openshift.io/cluster instead." - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - buildDefaults: - description: BuildDefaults controls the default information for Builds - properties: - defaultProxy: - description: "DefaultProxy contains the default proxy settings - for all build operations, including image pull/push and source - download. \n Values can be overrode by setting the `HTTP_PROXY`, - `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build - config's strategy." - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS - requests. Empty means unset and will not result in an env - var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames - and/or CIDRs and/or IPs for which the proxy should not be - used. Empty means unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used - to verify readiness of the proxy. - items: - type: string - type: array - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle. The trustedCA field should only - be consumed by a proxy validator. The validator is responsible - for reading the certificate bundle from the required key - \"ca-bundle.crt\", merging it with the system default trust - bundle, and writing the merged trust bundle to a ConfigMap - named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections - must use the trusted-ca-bundle for all HTTPS requests to - the proxy, and may use the trusted-ca-bundle for non-proxy - HTTPS requests as well. \n The namespace for the ConfigMap - referenced by trustedCA is \"openshift-config\". Here is - an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom - CA certificate bundle. -----END CERTIFICATE-----" - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - type: object - env: - description: Env is a set of default environment variables that - will be applied to the build if the specified variables do not - exist on the build - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - gitProxy: - description: "GitProxy contains the proxy settings for git operations - only. If set, this will override any Proxy settings for all - git commands, such as git clone. \n Values that are not set - here will be inherited from DefaultProxy." - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS - requests. Empty means unset and will not result in an env - var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames - and/or CIDRs and/or IPs for which the proxy should not be - used. Empty means unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used - to verify readiness of the proxy. - items: - type: string - type: array - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle. The trustedCA field should only - be consumed by a proxy validator. The validator is responsible - for reading the certificate bundle from the required key - \"ca-bundle.crt\", merging it with the system default trust - bundle, and writing the merged trust bundle to a ConfigMap - named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections - must use the trusted-ca-bundle for all HTTPS requests to - the proxy, and may use the trusted-ca-bundle for non-proxy - HTTPS requests as well. \n The namespace for the ConfigMap - referenced by trustedCA is \"openshift-config\". Here is - an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom - CA certificate bundle. -----END CERTIFICATE-----" - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - type: object - imageLabels: - description: ImageLabels is a list of docker labels that are applied - to the resulting image. User can override a default label by - providing a label with the same name in their Build/BuildConfig. - items: - properties: - name: - description: Name defines the name of the label. It must - have non-zero length. - type: string - value: - description: Value defines the literal value of the label. - type: string - type: object - type: array - resources: - description: Resources defines resource requirements to execute - the build. - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - buildOverrides: - description: BuildOverrides controls override settings for builds - properties: - forcePull: - description: ForcePull overrides, if set, the equivalent value - in the builds, i.e. false disables force pull for all builds, - true enables force pull for all builds, independently of what - each build specifies itself - type: boolean - imageLabels: - description: ImageLabels is a list of docker labels that are applied - to the resulting image. If user provided a label in their Build/BuildConfig - with the same name as one in this list, the user's label will - be overwritten. - items: - properties: - name: - description: Name defines the name of the label. It must - have non-zero length. - type: string - value: - description: Value defines the literal value of the label. - type: string - type: object - type: array - nodeSelector: - additionalProperties: - type: string - description: NodeSelector is a selector which must be true for - the build pod to fit on a node - type: object - tolerations: - description: Tolerations is a list of Tolerations that will override - any existing tolerations set on a build pod. - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml deleted file mode 100644 index 5e2dea3ea..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] APIServer" -crd: 0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create encrypt with aescbc - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aescbc - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aescbc - - name: Should be able to create encrypt with aesgcm - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aesgcm - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml deleted file mode 100644 index 92e7d72e6..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Should be able to use the OIDC type - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - - name: Cannot set username claim prefix with policy NoPrefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - prefix: - prefixString: "myoidc:" - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set username claim prefix with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - - name: Cannot leave username claim prefix blank with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set OIDC providers with no username prefixing - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - onUpdate: - - name: Updating OIDC provider with a client that's not in the status - initial: &initConfig | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: dif-namespace # new client here - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating OIDC provider with a client that's different from the previous one - initial: *initConfig - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: dif-namespace - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating previously existing client - initial: *initConfig - updated: &prevExistingUpdated | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *prevExistingUpdated - - name: Removing a configured client from the status (== component unregister) - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus - - name: Simply add a valid client - initial: *initConfig - updated: &addClient | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: namespace2 - componentName: name3 - clientID: justavalidclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *addClient diff --git a/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml deleted file mode 100644 index f3090558b..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml +++ /dev/null @@ -1,472 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterVersion - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - - name: Should allow image to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - - name: Should allow version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - - name: Should allow architecture to be empty - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - - name: Should allow architecture and version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - - name: Version must be set if architecture is set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - expectedError: "Version must be set if Architecture is set" - - name: Should not allow image and architecture to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability - - name: Should be able to set a custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - - name: Should be able to set multiple custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - - name: Invalid custom signature store should throw error - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "osus1.ocp.com" - expectedError: "url must be a valid absolute URL" - - name: Should be able to unset the signature stores - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - onUpdate: - - name: Should not allow image to be set if architecture set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should not allow architecture to be set if image set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml deleted file mode 100644 index ab1a123b6..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Custom] DNS" -crd: 0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DNS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} - - name: Should be able to specify an AWS role ARN for a private hosted zone - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Should not be able to specify unsupported platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: Azure - azure: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"string\": allowed values are '' and 'AWS'" - - name: Should not be able to specify invalid AWS role ARN - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - metadata: - name: cluster - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam:bad:123456789012:role/foo - expectedError: "DNS.config.openshift.io \"cluster\" is invalid: spec.platform.aws.privateZoneIAMRole: Invalid value: \"arn:aws:iam:bad:123456789012:role/foo\": spec.platform.aws.privateZoneIAMRole in body should match '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\\/.*$'" - - name: Should not be able to specify different type and platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" - onUpdate: - - name: Can switch from empty (default), to AWS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Upgrade case is valid - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" diff --git a/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml deleted file mode 100644 index 24433f4f7..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml +++ /dev/null @@ -1,321 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Custom] Infrastructure" -crd: 0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Infrastructure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} # No spec is required for a Infrastructure - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - onUpdate: - - name: Should not be able to modify an existing GCP ResourceLabels Label - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add a Label to an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to remove a Label from an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceLabels to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not be able to remove GCP ResourceLabels from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not have label key start with openshift-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "openshift-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not have label key start with kubernetes-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "kubernetes-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not be able to modify an existing GCP ResourceTags Tag - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add a Tag to an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - - {parentID: "test-project-123", key: "new", value: "tag"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to remove a Tag from an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - - {parentID: "test-project-123", key: "key2", value: "value2"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceTags to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to remove GCP ResourceTags from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to modify ParentID of a Tag in the GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "test-project-123", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" diff --git a/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml deleted file mode 100644 index 59e9fbdff..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] Network" -crd: 0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to set status conditions - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml deleted file mode 100644 index 57b546b63..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Scheduler" -crd: 0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Scheduler - initial: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} # No spec is required for a Scheduler - expected: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go index 4ff5208f2..f99454758 100644 --- a/vendor/github.com/openshift/api/config/v1/doc.go +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -1,6 +1,7 @@ // +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true // +kubebuilder:validation:Optional // +groupName=config.openshift.io diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go deleted file mode 100644 index 7b7cbf640..000000000 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ /dev/null @@ -1,464 +0,0 @@ -package v1 - -// FeatureGateDescription is a golang-only interface used to contains details for a feature gate. -type FeatureGateDescription struct { - // FeatureGateAttributes is the information that appears in the API - FeatureGateAttributes FeatureGateAttributes - - // OwningJiraComponent is the jira component that owns most of the impl and first assignment for the bug. - // This is the team that owns the feature long term. - OwningJiraComponent string - // ResponsiblePerson is the person who is on the hook for first contact. This is often, but not always, a team lead. - // It is someone who can make the promise on the behalf of the team. - ResponsiblePerson string - // OwningProduct is the product that owns the lifecycle of the gate. - OwningProduct OwningProduct -} - -type OwningProduct string - -var ( - ocpSpecific = OwningProduct("OCP") - kubernetes = OwningProduct("Kubernetes") -) - -var ( - FeatureGateValidatingAdmissionPolicy = FeatureGateName("ValidatingAdmissionPolicy") - validatingAdmissionPolicy = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateValidatingAdmissionPolicy, - }, - OwningJiraComponent: "kube-apiserver", - ResponsiblePerson: "benluddy", - OwningProduct: kubernetes, - } - - FeatureGateGatewayAPI = FeatureGateName("GatewayAPI") - gateGatewayAPI = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateGatewayAPI, - }, - OwningJiraComponent: "Routing", - ResponsiblePerson: "miciah", - OwningProduct: ocpSpecific, - } - - FeatureGateOpenShiftPodSecurityAdmission = FeatureGateName("OpenShiftPodSecurityAdmission") - openShiftPodSecurityAdmission = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateOpenShiftPodSecurityAdmission, - }, - OwningJiraComponent: "auth", - ResponsiblePerson: "stlaz", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProvider = FeatureGateName("ExternalCloudProvider") - externalCloudProvider = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProvider, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProviderAzure = FeatureGateName("ExternalCloudProviderAzure") - externalCloudProviderAzure = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProviderAzure, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProviderGCP = FeatureGateName("ExternalCloudProviderGCP") - externalCloudProviderGCP = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProviderGCP, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProviderExternal = FeatureGateName("ExternalCloudProviderExternal") - externalCloudProviderExternal = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProviderExternal, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "elmiko", - OwningProduct: ocpSpecific, - } - - FeatureGateCSIDriverSharedResource = FeatureGateName("CSIDriverSharedResource") - csiDriverSharedResource = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateCSIDriverSharedResource, - }, - OwningJiraComponent: "builds", - ResponsiblePerson: "adkaplan", - OwningProduct: ocpSpecific, - } - - FeatureGateBuildCSIVolumes = FeatureGateName("BuildCSIVolumes") - buildCSIVolumes = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateBuildCSIVolumes, - }, - OwningJiraComponent: "builds", - ResponsiblePerson: "adkaplan", - OwningProduct: ocpSpecific, - } - - FeatureGateNodeSwap = FeatureGateName("NodeSwap") - nodeSwap = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateNodeSwap, - }, - OwningJiraComponent: "node", - ResponsiblePerson: "ehashman", - OwningProduct: kubernetes, - } - - FeatureGateMachineAPIProviderOpenStack = FeatureGateName("MachineAPIProviderOpenStack") - machineAPIProviderOpenStack = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMachineAPIProviderOpenStack, - }, - OwningJiraComponent: "openstack", - ResponsiblePerson: "egarcia", - OwningProduct: ocpSpecific, - } - - FeatureGateInsightsConfigAPI = FeatureGateName("InsightsConfigAPI") - insightsConfigAPI = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateInsightsConfigAPI, - }, - OwningJiraComponent: "insights", - ResponsiblePerson: "tremes", - OwningProduct: ocpSpecific, - } - - FeatureGateDynamicResourceAllocation = FeatureGateName("DynamicResourceAllocation") - dynamicResourceAllocation = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateDynamicResourceAllocation, - }, - OwningJiraComponent: "scheduling", - ResponsiblePerson: "jchaloup", - OwningProduct: kubernetes, - } - - FeatureGateAzureWorkloadIdentity = FeatureGateName("AzureWorkloadIdentity") - azureWorkloadIdentity = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAzureWorkloadIdentity, - }, - OwningJiraComponent: "cloud-credential-operator", - ResponsiblePerson: "abutcher", - OwningProduct: ocpSpecific, - } - - FeatureGateMaxUnavailableStatefulSet = FeatureGateName("MaxUnavailableStatefulSet") - maxUnavailableStatefulSet = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMaxUnavailableStatefulSet, - }, - OwningJiraComponent: "apps", - ResponsiblePerson: "atiratree", - OwningProduct: kubernetes, - } - - FeatureGateEventedPLEG = FeatureGateName("EventedPLEG") - eventedPleg = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateEventedPLEG, - }, - OwningJiraComponent: "node", - ResponsiblePerson: "sairameshv", - OwningProduct: kubernetes, - } - - FeatureGatePrivateHostedZoneAWS = FeatureGateName("PrivateHostedZoneAWS") - privateHostedZoneAWS = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGatePrivateHostedZoneAWS, - }, - OwningJiraComponent: "Routing", - ResponsiblePerson: "miciah", - OwningProduct: ocpSpecific, - } - - FeatureGateSigstoreImageVerification = FeatureGateName("SigstoreImageVerification") - sigstoreImageVerification = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateSigstoreImageVerification, - }, - OwningJiraComponent: "node", - ResponsiblePerson: "sgrunert", - OwningProduct: ocpSpecific, - } - - FeatureGateGCPLabelsTags = FeatureGateName("GCPLabelsTags") - gcpLabelsTags = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateGCPLabelsTags, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "bhb", - OwningProduct: ocpSpecific, - } - - FeatureGateAlibabaPlatform = FeatureGateName("AlibabaPlatform") - alibabaPlatform = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAlibabaPlatform, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateCloudDualStackNodeIPs = FeatureGateName("CloudDualStackNodeIPs") - cloudDualStackNodeIPs = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateCloudDualStackNodeIPs, - }, - OwningJiraComponent: "machine-config-operator/platform-baremetal", - ResponsiblePerson: "mkowalsk", - OwningProduct: kubernetes, - } - FeatureGateVSphereStaticIPs = FeatureGateName("VSphereStaticIPs") - vSphereStaticIPs = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateVSphereStaticIPs, - }, - OwningJiraComponent: "splat", - ResponsiblePerson: "rvanderp3", - OwningProduct: ocpSpecific, - } - - FeatureGateRouteExternalCertificate = FeatureGateName("RouteExternalCertificate") - routeExternalCertificate = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateRouteExternalCertificate, - }, - OwningJiraComponent: "router", - ResponsiblePerson: "thejasn", - OwningProduct: ocpSpecific, - } - - FeatureGateAdminNetworkPolicy = FeatureGateName("AdminNetworkPolicy") - adminNetworkPolicy = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAdminNetworkPolicy, - }, - OwningJiraComponent: "Networking/ovn-kubernetes", - ResponsiblePerson: "tssurya", - OwningProduct: ocpSpecific, - } - - FeatureGateNetworkLiveMigration = FeatureGateName("NetworkLiveMigration") - sdnLiveMigration = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateNetworkLiveMigration, - }, - OwningJiraComponent: "Networking/ovn-kubernetes", - ResponsiblePerson: "pliu", - OwningProduct: ocpSpecific, - } - - FeatureGateAutomatedEtcdBackup = FeatureGateName("AutomatedEtcdBackup") - automatedEtcdBackup = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAutomatedEtcdBackup, - }, - OwningJiraComponent: "etcd", - ResponsiblePerson: "hasbro17", - OwningProduct: ocpSpecific, - } - - FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = FeatureGateName("MachineAPIOperatorDisableMachineHealthCheckController") - machineAPIOperatorDisableMachineHealthCheckController = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMachineAPIOperatorDisableMachineHealthCheckController, - }, - OwningJiraComponent: "ecoproject", - ResponsiblePerson: "msluiter", - OwningProduct: ocpSpecific, - } - - FeatureGateDNSNameResolver = FeatureGateName("DNSNameResolver") - dnsNameResolver = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateDNSNameResolver, - }, - OwningJiraComponent: "dns", - ResponsiblePerson: "miciah", - OwningProduct: ocpSpecific, - } - - FeatureGateVSphereControlPlaneMachineset = FeatureGateName("VSphereControlPlaneMachineSet") - vSphereControlPlaneMachineset = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateVSphereControlPlaneMachineset, - }, - OwningJiraComponent: "splat", - ResponsiblePerson: "rvanderp3", - OwningProduct: ocpSpecific, - } - - FeatureGateMachineConfigNodes = FeatureGateName("MachineConfigNodes") - machineConfigNodes = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMachineConfigNodes, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "cdoern", - OwningProduct: ocpSpecific, - } - - FeatureGateClusterAPIInstall = FeatureGateName("ClusterAPIInstall") - clusterAPIInstall = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateClusterAPIInstall, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "vincepri", - OwningProduct: ocpSpecific, - } - - FeatureGateMetricsServer = FeatureGateName("MetricsServer") - metricsServer = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMetricsServer, - }, - OwningJiraComponent: "Monitoring", - ResponsiblePerson: "slashpai", - OwningProduct: ocpSpecific, - } - - FeatureGateInstallAlternateInfrastructureAWS = FeatureGateName("InstallAlternateInfrastructureAWS") - installAlternateInfrastructureAWS = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateInstallAlternateInfrastructureAWS, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "padillon", - OwningProduct: ocpSpecific, - } - - FeatureGateGCPClusterHostedDNS = FeatureGateName("GCPClusterHostedDNS") - gcpClusterHostedDNS = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateGCPClusterHostedDNS, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "barbacbd", - OwningProduct: ocpSpecific, - } - - FeatureGateMixedCPUsAllocation = FeatureGateName("MixedCPUsAllocation") - mixedCPUsAllocation = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMixedCPUsAllocation, - }, - OwningJiraComponent: "NodeTuningOperator", - ResponsiblePerson: "titzhak", - OwningProduct: ocpSpecific, - } - - FeatureGateManagedBootImages = FeatureGateName("ManagedBootImages") - managedBootImages = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateManagedBootImages, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "djoshy", - OwningProduct: ocpSpecific, - } - - FeatureGateDisableKubeletCloudCredentialProviders = FeatureGateName("DisableKubeletCloudCredentialProviders") - disableKubeletCloudCredentialProviders = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateDisableKubeletCloudCredentialProviders, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: kubernetes, - } - - FeatureGateOnClusterBuild = FeatureGateName("OnClusterBuild") - onClusterBuild = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateOnClusterBuild, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "dkhater", - OwningProduct: ocpSpecific, - } - - FeatureGateSignatureStores = FeatureGateName("SignatureStores") - signatureStores = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateSignatureStores, - }, - OwningJiraComponent: "Cluster Version Operator", - ResponsiblePerson: "lmohanty", - OwningProduct: ocpSpecific, - } - - FeatureGateKMSv1 = FeatureGateName("KMSv1") - kmsv1 = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateKMSv1, - }, - OwningJiraComponent: "kube-apiserver", - ResponsiblePerson: "dgrisonnet", - OwningProduct: kubernetes, - } - - FeatureGatePinnedImages = FeatureGateName("PinnedImages") - pinnedImages = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGatePinnedImages, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "jhernand", - OwningProduct: ocpSpecific, - } - - FeatureGateUpgradeStatus = FeatureGateName("UpgradeStatus") - upgradeStatus = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateUpgradeStatus, - }, - OwningJiraComponent: "Cluster Version Operator", - ResponsiblePerson: "pmuller", - OwningProduct: ocpSpecific, - } - - FeatureGateTranslateStreamCloseWebsocketRequests = FeatureGateName("TranslateStreamCloseWebsocketRequests") - translateStreamCloseWebsocketRequests = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateTranslateStreamCloseWebsocketRequests, - }, - OwningJiraComponent: "kube-apiserver", - ResponsiblePerson: "akashem", - OwningProduct: kubernetes, - } - - FeatureGateVolumeGroupSnapshot = FeatureGateName("VolumeGroupSnapshot") - volumeGroupSnapshot = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateVolumeGroupSnapshot, - }, - OwningJiraComponent: "Storage / Kubernetes External Components", - ResponsiblePerson: "fbertina", - OwningProduct: kubernetes, - } -) diff --git a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml deleted file mode 100644 index 75f846a3d..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] APIServer" -crd: 0000_10_config-operator_01_apiserver-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create encrypt with aescbc - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aescbc - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aescbc - - name: Should be able to create encrypt with aesgcm - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aesgcm - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aesgcm - diff --git a/vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml deleted file mode 100644 index 6e966c15b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-Default.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Shouldn't be able to use the OIDC type in a stable version of the resource - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expectedError: "spec.type: Unsupported value: \"OIDC\": supported values: \"\", \"None\", \"IntegratedOAuth\"" \ No newline at end of file diff --git a/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml deleted file mode 100644 index b422ebd20..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Build" -crd: 0000_10_openshift-controller-manager-operator_01_build.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Build - initial: | - apiVersion: config.openshift.io/v1 - kind: Build - spec: {} # No spec is required for a Build - expected: | - apiVersion: config.openshift.io/v1 - kind: Build - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml deleted file mode 100644 index 177e8f691..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ClusterOperator" -crd: 0000_00_cluster-version-operator_01_clusteroperator.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterOperator - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterOperator - spec: {} # No spec is required for a ClusterOperator - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterOperator - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml deleted file mode 100644 index 4c3fed149..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml +++ /dev/null @@ -1,418 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterVersion - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - - name: Should allow image to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - - name: Should allow version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - - name: Should allow architecture to be empty - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - - name: Should allow architecture and version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - - name: Version must be set if architecture is set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - expectedError: "Version must be set if Architecture is set" - - name: Should not allow image and architecture to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability - onUpdate: - - name: Should not allow image to be set if architecture set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should not allow architecture to be set if image set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml deleted file mode 100644 index 0081816fc..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Console" -crd: 0000_10_config-operator_01_console.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Console - initial: | - apiVersion: config.openshift.io/v1 - kind: Console - spec: {} # No spec is required for a Console - expected: | - apiVersion: config.openshift.io/v1 - kind: Console - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml deleted file mode 100644 index 3054d200e..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml +++ /dev/null @@ -1,105 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] DNS" -crd: 0000_10_config-operator_01_dns-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DNS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} - - name: Should be able to specify an AWS role ARN for a private hosted zone - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Should not be able to specify unsupported platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: Azure - azure: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"string\": allowed values are '' and 'AWS'" - - name: Should not be able to specify invalid AWS role ARN - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - metadata: - name: cluster - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam:bad:123456789012:role/foo - expectedError: "DNS.config.openshift.io \"cluster\" is invalid: spec.platform.aws.privateZoneIAMRole: Invalid value: \"arn:aws:iam:bad:123456789012:role/foo\": spec.platform.aws.privateZoneIAMRole in body should match '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\\/.*$'" - - name: Should not be able to specify different type and platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" - onUpdate: - - name: Can switch from empty (default), to AWS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Upgrade case is valid - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - diff --git a/vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml deleted file mode 100644 index 6b6a4327a..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] FeatureGate" -crd: 0000_10_config-operator_01_featuregate.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal FeatureGate - initial: | - apiVersion: config.openshift.io/v1 - kind: FeatureGate - spec: {} # No spec is required for a FeatureGate - expected: | - apiVersion: config.openshift.io/v1 - kind: FeatureGate - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml deleted file mode 100644 index 406bf3861..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml +++ /dev/null @@ -1,298 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable][Hypershift] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Should be able to use the OIDC type - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - - name: Cannot set username claim prefix with policy NoPrefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - prefix: - prefixString: "myoidc:" - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set username claim prefix with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - - name: Cannot leave username claim prefix blank with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set OIDC providers with no username prefixing - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - onUpdate: - - name: Updating OIDC provider with a client that's not in the status - initial: &initConfig | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: dif-namespace # new client here - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating OIDC provider with a client that's different from the previous one - initial: *initConfig - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: dif-namespace - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating previously existing client - initial: *initConfig - updated: &prevExistingUpdated | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *prevExistingUpdated - - name: Removing a configured client from the status (== component unregister) - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus - - name: Simply add a valid client - initial: *initConfig - updated: &addClient | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: namespace2 - componentName: name3 - clientID: justavalidclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *addClient - - name: Remove all oidcProviders - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus diff --git a/vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml deleted file mode 100644 index 6bfbb820f..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Image" -crd: 0000_10_config-operator_01_image.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Image - initial: | - apiVersion: config.openshift.io/v1 - kind: Image - spec: {} # No spec is required for a Image - expected: | - apiVersion: config.openshift.io/v1 - kind: Image - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml deleted file mode 100644 index bffdb6bcd..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ImageContentPolicy" -crd: 0000_10_config-operator_01_imagecontentpolicy.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImageContentPolicy - initial: | - apiVersion: config.openshift.io/v1 - kind: ImageContentPolicy - spec: {} # No spec is required for a ImageContentPolicy - expected: | - apiVersion: config.openshift.io/v1 - kind: ImageContentPolicy - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml deleted file mode 100644 index c25b1696b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ImageDigestMirrorSet" -crd: 0000_10_config-operator_01_imagedigestmirrorset.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImageDigestMirrorSet - initial: | - apiVersion: config.openshift.io/v1 - kind: ImageDigestMirrorSet - spec: {} # No spec is required for a ImageDigestMirrorSet - expected: | - apiVersion: config.openshift.io/v1 - kind: ImageDigestMirrorSet - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml deleted file mode 100644 index de91eb2c5..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ImageTagMirrorSet" -crd: 0000_10_config-operator_01_imagetagmirrorset.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImageTagMirrorSet - initial: | - apiVersion: config.openshift.io/v1 - kind: ImageTagMirrorSet - spec: {} # No spec is required for a ImageTagMirrorSet - expected: | - apiVersion: config.openshift.io/v1 - kind: ImageTagMirrorSet - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml deleted file mode 100644 index 9d0861b68..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml +++ /dev/null @@ -1,1262 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Infrastructure" -crd: 0000_10_config-operator_01_infrastructure-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Infrastructure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} # No spec is required for a Infrastructure - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - - name: Should be able to pass 2 IP addresses to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - "2001:db8::1" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - "2001:db8::1" - - name: Should not be able to pass not-an-IP to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - not-an-ip-address - expectedError: "Invalid value: \"not-an-ip-address\"" - - name: Should not be able to pass 2 IPv4 addresses to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - 192.0.2.2 - expectedError: "apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass 2 IPv6 addresses to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - "2001:db8::1" - - "2001:db8::2" - expectedError: "apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass more than 2 entries to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - "2001:db8::1" - - 192.0.2.2 - expectedError: "Too many: 3: must have at most 2 items" - - name: Should be able to pass 2 IP addresses to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - "2001:db8::1" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - "2001:db8::1" - - name: Should not be able to pass not-an-IP to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - not-an-ip-address - expectedError: "Invalid value: \"not-an-ip-address\"" - - name: Should not be able to pass 2 IPv4 addresses to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - 192.0.2.2 - expectedError: "ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass 2 IPv6 addresses to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - "2001:db8::1" - - "2001:db8::2" - expectedError: "ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass more than 2 entries to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - "2001:db8::1" - - 192.0.2.2 - expectedError: "Too many: 3: must have at most 2 items" - - name: Should be able to pass 2 IP subnets addresses to machineNetworks in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - machineNetworks: - - "192.0.2.0/24" - - "2001:db8::0/32" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - machineNetworks: - - "192.0.2.0/24" - - "2001:db8::0/32" - - name: Should not be able to pass not-a-CIDR to machineNetworks in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - machineNetworks: - - 192.0.2.1 - expectedError: "Invalid value: \"192.0.2.1\"" - onUpdate: - - name: Should be able to change External platformName from unknown to something else - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: Unknown - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: M&PCloud - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: M&PCloud - - name: Should not be able to change External platformName once it was set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: M&PCloud - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: SomeOtherCoolplatformName - expectedError: " spec.platformSpec.external.platformName: Invalid value: \"string\": platform name cannot be changed once set" - - name: Should not be able to modify an existing Azure ResourceTags Tag - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add a Tag to an existing Azure ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to remove a Tag from an existing Azure ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add Azure ResourceTags to an empty platformStatus.azure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - azure: - resourceTags: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.azure: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to remove Azure ResourceTags from platformStatus.azure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: {} - expectedStatusError: "status.platformStatus.azure: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should be able to modify the ResourceGroupName while Azure ResourceTags are present - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceGroupName: foo - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - azure: - resourceGroupName: bar - resourceTags: - - {key: "key", value: "value"} - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - cpuPartitioning: None - platform: Azure - platformStatus: - azure: - resourceGroupName: bar - resourceTags: - - {key: "key", value: "value"} - - name: PowerVS platform status's resourceGroup length should not exceed the max length set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group-should-not-accept-the-string-that-exceeds-max-length-set - expectedStatusError: "status.platformStatus.powervs.resourceGroup: Too long: may not be longer than 40" - - name: PowerVS platform status's resourceGroup should match the regex configured - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: re$ource-group - expectedStatusError: "status.platformStatus.powervs.resourceGroup in body should match '^[a-zA-Z0-9-_ ]+$'" - - name: Should not be able to change PowerVS platform status's resourceGroup once it was set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: other-resource-group-name - expectedStatusError: "status.platformStatus.powervs.resourceGroup: Invalid value: \"string\": resourceGroup is immutable once set" - - name: Should not be able to unset PowerVS platform status's resourceGroup once it was set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - region: some-region - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - region: some-region - expectedStatusError: "status.platformStatus.powervs: Invalid value: \"object\": cannot unset resourceGroup once set" - - name: Should set load balancer type to OpenShiftManagedDefault if not specified - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - platform: OpenStack - platformStatus: - openstack: {} - type: OpenStack - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: OpenShiftManagedDefault - type: OpenStack - - name: Should be able to override the default load balancer with a valid value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - - name: Should not allow changing the immutable load balancer type field - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: OpenShiftManagedDefault - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: OpenStack - openstack: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow removing the immutable load balancer type field that was initially set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: OpenStack - openstack: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: {} - type: OpenStack - expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow setting the load balancer type to a wrong value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: FooBar - type: OpenStack - expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Unsupported value: \"FooBar\": supported values: \"OpenShiftManagedDefault\", \"UserManaged\"" - - name: Should not be able to update cloudControllerManager state to empty string when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platformStatus: - external: - cloudControllerManager: - state: "" - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should not be able to update cloudControllerManager state to External when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should be able to update cloudControllerManager state to None when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - - name: Should not be able to unset cloudControllerManager state when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should not be able to update cloudControllerManager state to empty string when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should not be able to update cloudControllerManager state to None when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should be able to update cloudControllerManager state to External when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - - name: Should not be able to unset cloudControllerManager state when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should not be able to update cloudControllerManager state to None when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should not be able to update cloudControllerManager state to External when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should be able to update cloudControllerManager state to empty string when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - - name: Should not be able to unset cloudControllerManager state when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should be able to update cloudControllerManager state to None when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - - name: Should be able to update cloudControllerManager state to empty string when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - - name: Should not be able to update cloudControllerManager state to External when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should be able to unset cloudControllerManager state when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - - name: Should not be able to add cloudControllerManager when cloudControllerManager is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external: Invalid value: \"object\": cloudControllerManager may not be added or removed once set" - - name: Should not be able to remove cloudControllerManager when cloudControllerManager is set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: {} - expectedStatusError: " status.platformStatus.external: Invalid value: \"object\": cloudControllerManager may not be added or removed once set" - - name: Should be able to add valid (URL) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: COS - url: https://dummy.cos.com - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: COS - url: https://dummy.cos.com - - name: Should not be able to add empty (URL) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: COS - url: " " - expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[0].url: Invalid value: \"string\": url must be a valid absolute URL" - - name: Should not be able to add invalid (URL) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: COS - url: dummy-cos-com - expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[1].url: Invalid value: \"string\": url must be a valid absolute URL" - - name: Should not be able to add invalid (Name) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: BadService - url: https://bad-service.com - expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[1].name: Unsupported value: \"BadService\": supported values: \"CIS\", \"COS\", \"DNSServices\", \"GlobalSearch\", \"GlobalTagging\", \"HyperProtect\", \"IAM\", \"KeyProtect\", \"ResourceController\", \"ResourceManager\", \"VPC\"" diff --git a/vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml deleted file mode 100644 index 90d48e896..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Ingress" -crd: 0000_10_config-operator_01_ingress.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Ingress - initial: | - apiVersion: config.openshift.io/v1 - kind: Ingress - spec: {} # No spec is required for a Ingress - expected: | - apiVersion: config.openshift.io/v1 - kind: Ingress - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml deleted file mode 100644 index c85d122a6..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Network" -crd: 0000_10_config-operator_01_network-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Network - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - - name: Should be able to set status conditions - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml deleted file mode 100644 index d6502600b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Node" -crd: 0000_10_config-operator_01_node.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Node - initial: | - apiVersion: config.openshift.io/v1 - kind: Node - spec: {} # No spec is required for a Node - expected: | - apiVersion: config.openshift.io/v1 - kind: Node - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml deleted file mode 100644 index d33d2bc1b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] OAuth" -crd: 0000_10_config-operator_01_oauth.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal OAuth - initial: | - apiVersion: config.openshift.io/v1 - kind: OAuth - spec: {} # No spec is required for a OAuth - expected: | - apiVersion: config.openshift.io/v1 - kind: OAuth - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml deleted file mode 100644 index 9dd7a4c6d..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] OperatorHub" -crd: 0000_03_marketplace-operator_01_operatorhub.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal OperatorHub - initial: | - apiVersion: config.openshift.io/v1 - kind: OperatorHub - spec: {} # No spec is required for a OperatorHub - expected: | - apiVersion: config.openshift.io/v1 - kind: OperatorHub - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml deleted file mode 100644 index 0144ad32f..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Project" -crd: 0000_10_config-operator_01_project.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Project - initial: | - apiVersion: config.openshift.io/v1 - kind: Project - spec: {} # No spec is required for a Project - expected: | - apiVersion: config.openshift.io/v1 - kind: Project - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml deleted file mode 100644 index d49b83247..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Proxy" -crd: 0000_03_config-operator_01_proxy.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Proxy - initial: | - apiVersion: config.openshift.io/v1 - kind: Proxy - spec: {} # No spec is required for a Proxy - expected: | - apiVersion: config.openshift.io/v1 - kind: Proxy - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml deleted file mode 100644 index d65965482..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Scheduler" -crd: 0000_10_config-operator_01_scheduler-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Scheduler - initial: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} # No spec is required for a Scheduler - expected: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml deleted file mode 100644 index 74aa92b47..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] APIServer" -crd: 0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create encrypt with aescbc - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aescbc - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aescbc - - name: Should be able to create encrypt with aesgcm - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aesgcm - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml deleted file mode 100644 index 9d978fcf5..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml +++ /dev/null @@ -1,298 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Should be able to use the OIDC type - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - - name: Cannot set username claim prefix with policy NoPrefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - prefix: - prefixString: "myoidc:" - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set username claim prefix with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - - name: Cannot leave username claim prefix blank with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set OIDC providers with no username prefixing - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - onUpdate: - - name: Updating OIDC provider with a client that's not in the status - initial: &initConfig | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: dif-namespace # new client here - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating OIDC provider with a client that's different from the previous one - initial: *initConfig - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: dif-namespace - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating previously existing client - initial: *initConfig - updated: &prevExistingUpdated | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *prevExistingUpdated - - name: Removing a configured client from the status (== component unregister) - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus - - name: Simply add a valid client - initial: *initConfig - updated: &addClient | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: namespace2 - componentName: name3 - clientID: justavalidclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *addClient - - name: Remove all oidcProviders - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml deleted file mode 100644 index 71988108e..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml +++ /dev/null @@ -1,472 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterVersion - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - - name: Should allow image to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - - name: Should allow version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - - name: Should allow architecture to be empty - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - - name: Should allow architecture and version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - - name: Version must be set if architecture is set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - expectedError: "Version must be set if Architecture is set" - - name: Should not allow image and architecture to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability - - name: Should be able to set a custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - - name: Should be able to set multiple custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - - name: Invalid custom signature store should throw error - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "osus1.ocp.com" - expectedError: "url must be a valid absolute URL" - - name: Should be able to unset the signature stores - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - onUpdate: - - name: Should not allow image to be set if architecture set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should not allow architecture to be set if image set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml deleted file mode 100644 index ec64352e3..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreview] DNS" -crd: 0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DNS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml deleted file mode 100644 index d4a1113f0..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml +++ /dev/null @@ -1,749 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] Infrastructure" -crd: 0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Infrastructure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} # No spec is required for a Infrastructure - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - onUpdate: - - name: Status Should contain default fields - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - controlPlaneTopology: HighlyAvailable - - name: Status update cpuPartitioning should fail validation check - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - cpuPartitioning: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - cpuPartitioning: "Invalid" - expectedStatusError: 'status.cpuPartitioning: Unsupported value: "Invalid": supported values: "None", "AllNodes"' - - name: Should set load balancer type to OpenShiftManagedDefault if not specified - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - platform: BareMetal - platformStatus: - baremetal: {} - type: BareMetal - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: OpenShiftManagedDefault - type: BareMetal - - name: Should be able to override the default load balancer with a valid value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - - name: Should not allow changing the immutable load balancer type field - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: OpenShiftManagedDefault - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow removing the immutable load balancer type field that was initially set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: {} - type: BareMetal - expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow setting the load balancer type to a wrong value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: FooBar - type: BareMetal - expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Unsupported value: \"FooBar\": supported values: \"OpenShiftManagedDefault\", \"UserManaged\"" - - name: Should not be able to modify an existing GCP ResourceLabels Label - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add a Label to an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to remove a Label from an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceLabels to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not be able to remove GCP ResourceLabels from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not have label key start with openshift-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "openshift-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not have label key start with kubernetes-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "kubernetes-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not be able to modify an existing GCP ResourceTags Tag - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add a Tag to an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - - {parentID: "test-project-123", key: "new", value: "tag"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to remove a Tag from an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - - {parentID: "test-project-123", key: "key2", value: "value2"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceTags to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to remove GCP ResourceTags from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to modify ParentID of a Tag in the GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "test-project-123", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: dnsType should default to `PlatformDefault` when not specified - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: {} - type: GCP - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: PlatformDefault - type: GCP - - name: should be able to set dnsType to non-default value of `ClusterHosted` - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - type: GCP - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - type: GCP - - name: Should not allow changing the immutable dnsType field - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: PlatformDefault - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig.dnsType: Invalid value: \"string\": dnsType is immutable" - - name: Should not accept non-IP address values for Load Balancer IPs - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - - not-an-ip-address - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig.clusterHosted.apiIntLoadBalancerIPs[1]: Invalid value: \"not-an-ip-address\": status.platformStatus.gcp.cloudLoadBalancerConfig.clusterHosted.apiIntLoadBalancerIPs[1] in body should match '(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*)'" - - name: Should not accept update when `clusterHosted` is specified with DNSType `PlatformDefault` - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: PlatformDefault - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig: Invalid value: \"object\": clusterHosted is permitted only when dnsType is ClusterHosted" - - name: Should not accept duplicate IP addresses for any of the Load Balancer IPs - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - - 10.10.20.20 - - 10.10.10.20 - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig.clusterHosted.apiIntLoadBalancerIPs[2]: Duplicate value: \"10.10.10.20\"" diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml deleted file mode 100644 index d15fae3a9..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] Network" -crd: 0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to set status conditions - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml deleted file mode 100644 index 5b5eb8954..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Scheduler" -crd: 0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Scheduler - initial: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} # No spec is required for a Scheduler - expected: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 6fb1b9adc..d4d09e7fe 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -401,7 +401,7 @@ const ( // IBMCloudServiceName contains a value specifying the name of an IBM Cloud Service, // which are used by MAPI, CIRO, CIO, Installer, etc. -// +kubebuilder:validation:Enum=CIS;COS;DNSServices;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC +// +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC type IBMCloudServiceName string const ( @@ -409,8 +409,12 @@ const ( IBMCloudServiceCIS IBMCloudServiceName = "CIS" // IBMCloudServiceCOS is the name for IBM Cloud COS. IBMCloudServiceCOS IBMCloudServiceName = "COS" + // IBMCloudServiceCOSConfig is the name for IBM Cloud COS Config service. + IBMCloudServiceCOSConfig IBMCloudServiceName = "COSConfig" // IBMCloudServiceDNSServices is the name for IBM Cloud DNS Services. IBMCloudServiceDNSServices IBMCloudServiceName = "DNSServices" + // IBMCloudServiceGlobalCatalog is the name for IBM Cloud Global Catalog service. + IBMCloudServiceGlobalCatalog IBMCloudServiceName = "GlobalCatalog" // IBMCloudServiceGlobalSearch is the name for IBM Cloud Global Search. IBMCloudServiceGlobalSearch IBMCloudServiceName = "GlobalSearch" // IBMCloudServiceGlobalTagging is the name for IBM Cloud Global Tagging. diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 59b89388b..d815556d2 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -14,6 +14,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=apiservers,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type APIServer struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index 62c9e7f5a..f6f0c12a3 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -4,15 +4,20 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced -// +kubebuilder:subresource:status // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=authentications,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Authentication struct { metav1.TypeMeta `json:",inline"` @@ -85,7 +90,7 @@ type AuthenticationSpec struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:MaxItems=1 - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=ExternalOIDC OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -112,7 +117,7 @@ type AuthenticationStatus struct { // +listMapKey=componentNamespace // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=ExternalOIDC OIDCClients []OIDCClientStatus `json:"oidcClients"` } @@ -130,8 +135,8 @@ type AuthenticationList struct { Items []Authentication `json:"items"` } -// +openshift:validation:FeatureSetAwareEnum:featureSet=Default,enum="";None;IntegratedOAuth -// +openshift:validation:FeatureSetAwareEnum:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,enum="";None;IntegratedOAuth;OIDC +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC type AuthenticationType string const ( diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index e9aef0375..dad47666d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -16,6 +16,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=openshift-controller-manager,operatorOrdering=01 +// +openshift:capability=Build +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=builds,scope=Cluster +// +kubebuilder:subresource:status type Build struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 78666bb1e..7951762cc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -15,6 +15,17 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusteroperators,scope=Cluster,shortName=co +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.versions[?(@.name=="operator")].version,type=string,description=The version the operator is at. +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string,description=Whether the operator is running and stable. +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string,description=Whether the operator is processing changes. +// +kubebuilder:printcolumn:name=Degraded,JSONPath=.status.conditions[?(@.type=="Degraded")].status,type=string,description=Whether the operator is degraded. +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Available")].lastTransitionTime,type=date,description=The time the operator's Available status last changed. +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true type ClusterOperator struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index dc913b75c..61386a72e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -13,8 +13,18 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 -// +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'baremetal' in self.spec.capabilities.additionalEnabledCapabilities ? 'MachineAPI' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'MachineAPI' in self.status.capabilities.enabledCapabilities) : true",message="the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability" +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/495 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterversions,scope=Cluster // +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'marketplace' in self.spec.capabilities.additionalEnabledCapabilities ? 'OperatorLifecycleManager' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'OperatorLifecycleManager' in self.status.capabilities.enabledCapabilities) : true",message="the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability" +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.history[?(@.state=="Completed")].version,type=string +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Progressing")].lastTransitionTime,type=date +// +kubebuilder:printcolumn:name=Status,JSONPath=.status.conditions[?(@.type=="Progressing")].message,type=string +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true type ClusterVersion struct { metav1.TypeMeta `json:",inline"` @@ -100,7 +110,7 @@ type ClusterVersionSpec struct { // // A maximum of 32 signature stores may be configured. // +kubebuilder:validation:MaxItems=32 - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=SignatureStores // +listType=map // +listMapKey=url // +optional @@ -418,7 +428,7 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ } // ClusterVersionCapabilitySet defines sets of cluster version capabilities. -// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;v4.16;vCurrent +// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;v4.16;v4.17;v4.18;vCurrent type ClusterVersionCapabilitySet string const ( @@ -462,6 +472,18 @@ const ( // version of OpenShift is installed. ClusterVersionCapabilitySet4_16 ClusterVersionCapabilitySet = "v4.16" + // ClusterVersionCapabilitySet4_17 is the recommended set of + // optional capabilities to enable for the 4.17 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_17 ClusterVersionCapabilitySet = "v4.17" + + // ClusterVersionCapabilitySet4_18 is the recommended set of + // optional capabilities to enable for the 4.18 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_18 ClusterVersionCapabilitySet = "v4.18" + // ClusterVersionCapabilitySetCurrent is the recommended set // of optional capabilities to enable for the cluster's // current version of OpenShift. @@ -546,6 +568,42 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, }, + ClusterVersionCapabilitySet4_17: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, + ClusterVersionCapabilitySet4_18: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + ClusterVersionCapabilityIngress, + ClusterVersionCapabilityCloudControllerManager, + }, ClusterVersionCapabilitySetCurrent: { ClusterVersionCapabilityBaremetal, ClusterVersionCapabilityConsole, diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index 928181849..e8f197b34 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -14,6 +14,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=consoles,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Console struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 5f8697673..5daa5d78d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -10,6 +10,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type DNS struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index f608fd0be..88d94ac52 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -1,8 +1,6 @@ package v1 import ( - "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -14,6 +12,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=featuregates,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type FeatureGate struct { metav1.TypeMeta `json:",inline"` @@ -24,6 +28,7 @@ type FeatureGate struct { // spec holds user settable values for configuration // +kubebuilder:validation:Required // +required + // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" Spec FeatureGateSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. // +optional @@ -40,13 +45,17 @@ var ( // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + // DevPreviewNoUpgrade turns on dev preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + DevPreviewNoUpgrade FeatureSet = "DevPreviewNoUpgrade" + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations // your cluster may fail in an unrecoverable way. CustomNoUpgrade FeatureSet = "CustomNoUpgrade" - // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature. - LatencySensitive FeatureSet = "LatencySensitive" + // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade} ) type FeatureGateSpec struct { @@ -59,6 +68,10 @@ type FeatureGateSelection struct { // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. // +unionDiscriminator // +optional + // +kubebuilder:validation:Enum=CustomNoUpgrade;DevPreviewNoUpgrade;TechPreviewNoUpgrade;"" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" FeatureSet FeatureSet `json:"featureSet,omitempty"` // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. @@ -138,176 +151,3 @@ type FeatureGateList struct { Items []FeatureGate `json:"items"` } - -type FeatureGateEnabledDisabled struct { - Enabled []FeatureGateDescription - Disabled []FeatureGateDescription -} - -// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. -// -// NOTE: The caller needs to make sure to check for the existence of the value -// using golang's existence field. A possible scenario is an upgrade where new -// FeatureSets are added and a controller has not been upgraded with a newer -// version of this file. In this upgrade scenario the map could return nil. -// -// example: -// -// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { } -// -// If you put an item in either of these lists, put your area and name on it so we can find owners. -var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ - Default: defaultFeatures, - CustomNoUpgrade: { - Enabled: []FeatureGateDescription{}, - Disabled: []FeatureGateDescription{ - disableKubeletCloudCredentialProviders, // We do not currently ship the correct config to use the external credentials provider. - }, - }, - TechPreviewNoUpgrade: newDefaultFeatures(). - with(validatingAdmissionPolicy). - with(csiDriverSharedResource). - with(nodeSwap). - with(machineAPIProviderOpenStack). - with(insightsConfigAPI). - with(dynamicResourceAllocation). - with(gateGatewayAPI). - with(maxUnavailableStatefulSet). - without(eventedPleg). - with(sigstoreImageVerification). - with(gcpLabelsTags). - with(gcpClusterHostedDNS). - with(vSphereStaticIPs). - with(routeExternalCertificate). - with(automatedEtcdBackup). - without(machineAPIOperatorDisableMachineHealthCheckController). - with(adminNetworkPolicy). - with(dnsNameResolver). - with(machineConfigNodes). - with(metricsServer). - with(installAlternateInfrastructureAWS). - without(clusterAPIInstall). - with(mixedCPUsAllocation). - with(managedBootImages). - without(disableKubeletCloudCredentialProviders). - with(onClusterBuild). - with(signatureStores). - with(pinnedImages). - with(upgradeStatus). - with(translateStreamCloseWebsocketRequests). - with(volumeGroupSnapshot). - toFeatures(defaultFeatures), - LatencySensitive: newDefaultFeatures(). - toFeatures(defaultFeatures), -} - -var defaultFeatures = &FeatureGateEnabledDisabled{ - Enabled: []FeatureGateDescription{ - openShiftPodSecurityAdmission, - alibabaPlatform, // This is a bug, it should be TechPreviewNoUpgrade. This must be downgraded before 4.14 is shipped. - azureWorkloadIdentity, - cloudDualStackNodeIPs, - externalCloudProvider, - externalCloudProviderAzure, - externalCloudProviderGCP, - externalCloudProviderExternal, - privateHostedZoneAWS, - buildCSIVolumes, - kmsv1, - vSphereControlPlaneMachineset, - sdnLiveMigration, - }, - Disabled: []FeatureGateDescription{ - disableKubeletCloudCredentialProviders, // We do not currently ship the correct config to use the external credentials provider. - }, -} - -type featureSetBuilder struct { - forceOn []FeatureGateDescription - forceOff []FeatureGateDescription -} - -func newDefaultFeatures() *featureSetBuilder { - return &featureSetBuilder{} -} - -func (f *featureSetBuilder) with(forceOn FeatureGateDescription) *featureSetBuilder { - for _, curr := range f.forceOn { - if curr.FeatureGateAttributes.Name == forceOn.FeatureGateAttributes.Name { - panic(fmt.Errorf("coding error: %q enabled twice", forceOn.FeatureGateAttributes.Name)) - } - } - f.forceOn = append(f.forceOn, forceOn) - return f -} - -func (f *featureSetBuilder) without(forceOff FeatureGateDescription) *featureSetBuilder { - for _, curr := range f.forceOff { - if curr.FeatureGateAttributes.Name == forceOff.FeatureGateAttributes.Name { - panic(fmt.Errorf("coding error: %q disabled twice", forceOff.FeatureGateAttributes.Name)) - } - } - f.forceOff = append(f.forceOff, forceOff) - return f -} - -func (f *featureSetBuilder) isForcedOff(needle FeatureGateDescription) bool { - for _, forcedOff := range f.forceOff { - if needle.FeatureGateAttributes.Name == forcedOff.FeatureGateAttributes.Name { - return true - } - } - return false -} - -func (f *featureSetBuilder) isForcedOn(needle FeatureGateDescription) bool { - for _, forceOn := range f.forceOn { - if needle.FeatureGateAttributes.Name == forceOn.FeatureGateAttributes.Name { - return true - } - } - return false -} - -func (f *featureSetBuilder) toFeatures(defaultFeatures *FeatureGateEnabledDisabled) *FeatureGateEnabledDisabled { - finalOn := []FeatureGateDescription{} - finalOff := []FeatureGateDescription{} - - // only add the default enabled features if they haven't been explicitly set off - for _, defaultOn := range defaultFeatures.Enabled { - if !f.isForcedOff(defaultOn) { - finalOn = append(finalOn, defaultOn) - } - } - for _, currOn := range f.forceOn { - if f.isForcedOff(currOn) { - panic("coding error, you can't have features both on and off") - } - found := false - for _, alreadyOn := range finalOn { - if alreadyOn.FeatureGateAttributes.Name == currOn.FeatureGateAttributes.Name { - found = true - } - } - if found { - continue - } - - finalOn = append(finalOn, currOn) - } - - // only add the default disabled features if they haven't been explicitly set on - for _, defaultOff := range defaultFeatures.Disabled { - if !f.isForcedOn(defaultOff) { - finalOff = append(finalOff, defaultOff) - } - } - for _, currOff := range f.forceOff { - finalOff = append(finalOff, currOff) - } - - return &FeatureGateEnabledDisabled{ - Enabled: finalOn, - Disabled: finalOff, - } -} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index 928224c0d..d3c694a56 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -15,6 +15,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=images,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Image struct { metav1.TypeMeta `json:",inline"` @@ -31,6 +37,23 @@ type Image struct { Status ImageStatus `json:"status"` } +// ImportModeType describes how to import an image manifest. +// +enum +// +kubebuilder:validation:Enum:="";Legacy;PreserveOriginal +type ImportModeType string + +const ( + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" +) + type ImageSpec struct { // allowedRegistriesForImport limits the container image registries that normal users may import // images from. Set this list to the registries that you trust to contain valid Docker @@ -39,6 +62,7 @@ type ImageSpec struct { // this policy - typically only administrators or system integrations will have those // permissions. // +optional + // +listType=atomic AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` // externalRegistryHostnames provides the hostnames for the default external image @@ -46,6 +70,7 @@ type ImageSpec struct { // is exposed externally. The first value is used in 'publicDockerImageRepository' // field in ImageStreams. The value must be in "hostname[:port]" format. // +optional + // +listType=atomic ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that @@ -61,6 +86,21 @@ type ImageSpec struct { // internal cluster registry. // +optional RegistrySources RegistrySources `json:"registrySources"` + + // imageStreamImportMode controls the import mode behaviour of imagestreams. + // It can be set to `Legacy` or `PreserveOriginal` or the empty string. If this value + // is specified, this setting is applied to all newly created imagestreams which do not have the + // value set. `Legacy` indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, + // the manifest list and all its sub-manifests will be imported. When empty, the behaviour will be + // decided based on the payload type advertised by the ClusterVersion status, i.e single arch payload + // implies the import mode is Legacy and multi payload implies PreserveOriginal. + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + ImageStreamImportMode ImportModeType `json:"imageStreamImportMode"` } type ImageStatus struct { @@ -76,7 +116,22 @@ type ImageStatus struct { // is exposed externally. The first value is used in 'publicDockerImageRepository' // field in ImageStreams. The value must be in "hostname[:port]" format. // +optional + // +listType=atomic ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // imageStreamImportMode controls the import mode behaviour of imagestreams. It can be + // `Legacy` or `PreserveOriginal`. `Legacy` indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, + // the manifest list and all its sub-manifests will be imported. This value will be reconciled based + // on either the spec value or if no spec value is specified, the image registry operator would look + // at the ClusterVersion status to determine the payload type and set the import mode accordingly, + // i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal. + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + ImageStreamImportMode ImportModeType `json:"imageStreamImportMode,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -110,16 +165,19 @@ type RegistryLocation struct { type RegistrySources struct { // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. // +optional + // +listType=atomic InsecureRegistries []string `json:"insecureRegistries,omitempty"` // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. // // Only one of BlockedRegistries or AllowedRegistries may be set. // +optional + // +listType=atomic BlockedRegistries []string `json:"blockedRegistries,omitempty"` // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. // // Only one of BlockedRegistries or AllowedRegistries may be set. // +optional + // +listType=atomic AllowedRegistries []string `json:"allowedRegistries,omitempty"` // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified // domains in their pull specs. Registries will be searched in the order provided in the list. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index 3dc315f68..74df4027f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -11,6 +11,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/874 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ImageContentPolicy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index 987c6cfdc..43d748c0c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -11,6 +11,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ImageDigestMirrorSet struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index 295522e59..ca8d35515 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -11,6 +11,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ImageTagMirrorSet struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 4ff2e68f1..392d128c1 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -13,6 +13,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=infrastructures,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Infrastructure struct { metav1.TypeMeta `json:",inline"` @@ -263,6 +269,7 @@ type ExternalPlatformSpec struct { // PlatformSpec holds the desired state specific to the underlying infrastructure provider // of the current cluster. Since these are used at spec-level for the underlying cluster, it // is supposed that only one of the spec structs is set. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vsphere) && has(self.vsphere) ? size(self.vsphere.vcenters) < 2 : true",message="vcenters can have at most 1 item when configured post-install" type PlatformSpec struct { // type is the underlying infrastructure provider for the cluster. This // value controls whether infrastructure automation such as service load @@ -603,8 +610,8 @@ const ( type GCPPlatformSpec struct{} // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. -// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" -// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type GCPPlatformStatus struct { // resourceGroupName is the Project ID for new GCP resources created for the cluster. ProjectID string `json:"projectID"` @@ -621,7 +628,7 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=GCPLabelsTags ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. @@ -632,7 +639,7 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=GCPLabelsTags ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` // This field was introduced and removed under tech preview. @@ -649,7 +656,7 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=GCPClusterHostedDNS // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -819,8 +826,8 @@ type BareMetalPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -834,16 +841,17 @@ type BareMetalPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic // +optional IngressIPs []IP `json:"ingressIPs"` // machineNetworks are IP networks used to connect all the OpenShift cluster // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, // for example "10.0.0.0/8" or "fd00::/8". - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -868,7 +876,8 @@ type BareMetalPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic APIServerInternalIPs []string `json:"apiServerInternalIPs"` // ingressIP is an external IP which routes to the default ingress controller. @@ -884,7 +893,8 @@ type BareMetalPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic IngressIPs []string `json:"ingressIPs"` // nodeDNSIP is the IP address for the internal DNS used by the @@ -898,13 +908,14 @@ type BareMetalPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -946,8 +957,8 @@ type OpenStackPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -961,16 +972,17 @@ type OpenStackPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic // +optional IngressIPs []IP `json:"ingressIPs"` // machineNetworks are IP networks used to connect all the OpenShift cluster // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, // for example "10.0.0.0/8" or "fd00::/8". - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -993,7 +1005,8 @@ type OpenStackPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic APIServerInternalIPs []string `json:"apiServerInternalIPs"` // cloudName is the name of the desired OpenStack cloud in the @@ -1013,7 +1026,8 @@ type OpenStackPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic IngressIPs []string `json:"ingressIPs"` // nodeDNSIP is the IP address for the internal DNS used by the @@ -1031,8 +1045,9 @@ type OpenStackPlatformStatus struct { LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -1080,6 +1095,7 @@ type OvirtPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set APIServerInternalIPs []string `json:"apiServerInternalIPs"` @@ -1096,6 +1112,7 @@ type OvirtPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set IngressIPs []string `json:"ingressIPs"` @@ -1105,7 +1122,7 @@ type OvirtPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` } @@ -1189,13 +1206,16 @@ type VSpherePlatformTopology struct { ComputeCluster string `json:"computeCluster"` // networks is the list of port group network names within this failure domain. - // Currently, we only support a single interface per RHCOS virtual machine. + // If feature gate VSphereMultiNetworks is enabled, up to 10 network adapters may be defined. + // 10 is the maximum number of virtual network devices which may be attached to a VM as defined by: + // https://configmax.esp.vmware.com/guest?vmwareproduct=vSphere&release=vSphere%208.0&categories=1-0 // The available networks (port groups) can be listed using // `govc ls 'network/*'` - // The single interface should be the absolute path of the form + // Networks should be in the form of an absolute path: // //network/. // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10 // +kubebuilder:validation:MinItems=1 // +listType=atomic Networks []string `json:"networks"` @@ -1235,6 +1255,7 @@ type VSpherePlatformTopology struct { // VSpherePlatformFailureDomainSpec. // For example, for zone=zonea, region=region1, and infrastructure name=test, // the template path would be calculated as //vm/test-rhcos-region1-zonea. + // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` @@ -1321,14 +1342,22 @@ type VSpherePlatformNodeNetworking struct { // use these fields for configuration. // +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set" // +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.vcenters) && has(self.vcenters) ? size(self.vcenters) < 2 : true",message="vcenters can have at most 1 item when configured post-install" type VSpherePlatformSpec struct { // vcenters holds the connection details for services to communicate with vCenter. - // Currently, only a single vCenter is supported. + // Currently, only a single vCenter is supported, but in tech preview 3 vCenters are supported. + // Once the cluster has been installed, you are unable to change the current number of defined + // vCenters except in the case where the cluster has been upgraded from a version of OpenShift + // where the vsphere platform spec was not present. You may make modifications to the existing + // vCenters that are defined in the vcenters list in order to match with any added or modified + // failure domains. // --- // + If VCenters is not defined use the existing cloud-config configmap defined // + in openshift-config. - // +kubebuilder:validation:MaxItems=1 // +kubebuilder:validation:MinItems=0 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 + // +kubebuilder:validation:XValidation:rule="size(self) != size(oldSelf) ? size(oldSelf) == 0 && size(self) < 2 : true",message="vcenters cannot be added or removed once set" // +listType=atomic // +optional VCenters []VSpherePlatformVCenterSpec `json:"vcenters,omitempty"` @@ -1359,8 +1388,8 @@ type VSpherePlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -1374,16 +1403,17 @@ type VSpherePlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - // +listType=set + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic // +optional IngressIPs []IP `json:"ingressIPs"` // machineNetworks are IP networks used to connect all the OpenShift cluster // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, // for example "10.0.0.0/8" or "fd00::/8". - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -1406,7 +1436,8 @@ type VSpherePlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic APIServerInternalIPs []string `json:"apiServerInternalIPs"` // ingressIP is an external IP which routes to the default ingress controller. @@ -1422,7 +1453,8 @@ type VSpherePlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 - // +listType=set + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +listType=atomic IngressIPs []string `json:"ingressIPs"` // nodeDNSIP is the IP address for the internal DNS used by the @@ -1436,13 +1468,14 @@ type VSpherePlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` // machineNetworks are IP networks used to connect all the OpenShift cluster nodes. - // +listType=set + // +listType=atomic // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" // +optional MachineNetworks []CIDR `json:"machineNetworks"` } @@ -1451,7 +1484,7 @@ type VSpherePlatformStatus struct { // override existing defaults of IBM Cloud Services. type IBMCloudServiceEndpoint struct { // name is the name of the IBM Cloud service. - // Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. + // Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. // For example, the IBM Cloud Private IAM service could be configured with the // service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured @@ -1807,6 +1840,7 @@ type NutanixPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set APIServerInternalIPs []string `json:"apiServerInternalIPs"` @@ -1823,13 +1857,14 @@ type NutanixPlatformStatus struct { // // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set IngressIPs []string `json:"ingressIPs"` // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` } @@ -1851,17 +1886,13 @@ type InfrastructureList struct { } // IP is an IP address (for example, "10.0.0.0" or "fd00::"). -// +kubebuilder:validation:Pattern=`(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*)` -// + --- -// + The regex for the IPv4 and IPv6 address was taken from -// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/ -// + The resulting regex is an OR of both regexes. +// +kubebuilder:validation:XValidation:rule="isIP(self)",message="value must be a valid IP address" +// +kubebuilder:validation:MaxLength:=39 +// +kubebuilder:validation:MinLength:=1 type IP string // CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). -// +kubebuilder:validation:Pattern=`(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$)` -// + --- -// + The regex for the IPv4 and IPv6 CIDR range was taken from -// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/ -// + The resulting regex is an OR of both regexes. +// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" +// +kubebuilder:validation:MaxLength:=43 +// +kubebuilder:validation:MinLength:=1 type CIDR string diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index e518f6765..302913a16 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -13,6 +13,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=ingresses,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Ingress struct { metav1.TypeMeta `json:",inline"` @@ -164,20 +170,20 @@ const ( // +kubebuilder:validation:MaxLength=512 type ConsumingUser string -// Hostname is an alias for hostname string validation. -// -// The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it -// allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric -// characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. -// ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ -// -// The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, -// except that it allows hostnames longer than the maximum length: -// ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ -// -// Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname -// was saved via validation by the incorrect left operand of the | operator. -// +// Hostname is a host name as defined by RFC-1123. +// + --- +// + The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it +// + allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric +// + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. +// + ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ +// + +// + The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, +// + except that it allows hostnames longer than the maximum length: +// + ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ +// + +// + Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname +// + was saved via validation by the incorrect left operand of the | operator. +// + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` type Hostname string diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 794f3db7b..1eeae69da 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -1,6 +1,9 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -10,7 +13,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Please view network.spec for an explanation on what applies when configuring this resource. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 // +openshift:compatibility-gen:level=1 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=networks,scope=Cluster +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Network struct { metav1.TypeMeta `json:",inline"` @@ -34,21 +42,24 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" type NetworkSpec struct { // IP address pool to use for pod IPs. // This field is immutable after installation. + // +listType=atomic ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` // IP address pool for services. // Currently, we only support a single entry here. // This field is immutable after installation. + // +listType=atomic ServiceNetwork []string `json:"serviceNetwork"` - // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). + // NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). // This should match a value that the cluster-network-operator understands, // or else no networking will be installed. // Currently supported values are: - // - OpenShiftSDN + // - OVNKubernetes // This field is immutable after installation. NetworkType string `json:"networkType"` @@ -66,18 +77,31 @@ type NetworkSpec struct { // installed. // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` + + // networkDiagnostics defines network diagnostics configuration. + // + // Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. + // If networkDiagnostics is not specified or is empty, + // and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, + // the network diagnostics feature will be disabled. + // + // +optional + // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig + NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` } // NetworkStatus is the current network configuration. type NetworkStatus struct { // IP address pool to use for pod IPs. + // +listType=atomic ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` // IP address pool for services. // Currently, we only support a single entry here. + // +listType=atomic ServiceNetwork []string `json:"serviceNetwork,omitempty"` - // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + // NetworkType is the plugin that is deployed (e.g. OVNKubernetes). NetworkType string `json:"networkType,omitempty"` // ClusterNetworkMTU is the MTU for inter-pod networking. @@ -87,14 +111,13 @@ type NetworkStatus struct { Migration *NetworkMigration `json:"migration,omitempty"` // conditions represents the observations of a network.config current state. - // Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady", - // "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse" - // and "NetworkTypeMigrationOriginalCNIPurged" + // Known .status.conditions.type are: "NetworkDiagnosticsAvailable" // +optional // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type + // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } @@ -127,6 +150,7 @@ type ExternalIPConfig struct { // ExternalIPPolicy rules. // Currently, only one entry may be provided. // +optional + // +listType=atomic AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` } @@ -135,11 +159,13 @@ type ExternalIPConfig struct { // The policy controller always allows automatically assigned external IPs. type ExternalIPPolicy struct { // allowedCIDRs is the list of allowed CIDRs. + // +listType=atomic AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` // rejectedCIDRs is the list of disallowed CIDRs. These take precedence // over allowedCIDRs. // +optional + // +listType=atomic RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` } @@ -157,15 +183,15 @@ type NetworkList struct { Items []Network `json:"items"` } -// NetworkMigration represents the cluster network configuration. +// NetworkMigration represents the network migration status. type NetworkMigration struct { - // NetworkType is the target plugin that is to be deployed. - // Currently supported values are: OpenShiftSDN, OVNKubernetes - // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"} + // NetworkType is the target plugin that is being deployed. + // DEPRECATED: network type migration is no longer supported, + // so this should always be unset. // +optional NetworkType string `json:"networkType,omitempty"` - // MTU contains the MTU migration configuration. + // MTU is the MTU configuration that is being deployed. // +optional MTU *MTUMigration `json:"mtu,omitempty"` } @@ -192,3 +218,89 @@ type MTUMigrationValues struct { // +optional From *uint32 `json:"from,omitempty"` } + +// NetworkDiagnosticsMode is an enumeration of the available network diagnostics modes +// Valid values are "", "All", "Disabled". +// +kubebuilder:validation:Enum:="";All;Disabled +type NetworkDiagnosticsMode string + +const ( + // NetworkDiagnosticsNoOpinion means that the user has no opinion and the platform is left + // to choose reasonable default. The current default is All and is a subject to change over time. + NetworkDiagnosticsNoOpinion NetworkDiagnosticsMode = "" + // NetworkDiagnosticsAll means that all network diagnostics checks are enabled + NetworkDiagnosticsAll NetworkDiagnosticsMode = "All" + // NetworkDiagnosticsDisabled means that network diagnostics is disabled + NetworkDiagnosticsDisabled NetworkDiagnosticsMode = "Disabled" +) + +// NetworkDiagnostics defines network diagnostics configuration + +type NetworkDiagnostics struct { + // mode controls the network diagnostics mode + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is All. + // + // +optional + Mode NetworkDiagnosticsMode `json:"mode"` + + // sourcePlacement controls the scheduling of network diagnostics source deployment + // + // See NetworkDiagnosticsSourcePlacement for more details about default values. + // + // +optional + SourcePlacement NetworkDiagnosticsSourcePlacement `json:"sourcePlacement"` + + // targetPlacement controls the scheduling of network diagnostics target daemonset + // + // See NetworkDiagnosticsTargetPlacement for more details about default values. + // + // +optional + TargetPlacement NetworkDiagnosticsTargetPlacement `json:"targetPlacement"` +} + +// NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components +type NetworkDiagnosticsSourcePlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is an empty list. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} + +// NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components +type NetworkDiagnosticsTargetPlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `- operator: "Exists"` which means that all taints are tolerated. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 233c89d9c..b3b1b62c4 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -14,8 +14,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1107 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true // +kubebuilder:resource:path=nodes,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Node struct { metav1.TypeMeta `json:",inline"` @@ -44,7 +48,15 @@ type NodeSpec struct { WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` } -type NodeStatus struct{} +type NodeStatus struct { + // conditions contain the details and the current state of the nodes.config object + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} // +kubebuilder:validation:Enum=v1;v2;"" type CgroupMode string diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index 451a5ec38..dce08a17f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -14,6 +14,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=oauths,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type OAuth struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index ba2c96343..1fddfa51e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -38,9 +38,14 @@ type OperatorHubStatus struct { // enabled to disabled and vice versa. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=operatorhubs,scope=Cluster // +kubebuilder:subresource:status // +genclient // +genclient:nonNamespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=marketplace,operatorOrdering=01 +// +openshift:capability=marketplace // +openshift:compatibility-gen:level=1 type OperatorHub struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index 85afb90c2..78fd3f41a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -10,6 +10,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=projects,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Project struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 40ed296d6..2dfc66b1c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -12,6 +12,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=proxies,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Proxy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 07c4d2e42..2749f4f70 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -11,6 +11,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=schedulers,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type Scheduler struct { metav1.TypeMeta `json:",inline"` @@ -43,7 +49,7 @@ type SchedulerSpec struct { // +optional Profile SchedulerProfile `json:"profile,omitempty"` // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=DynamicResourceAllocation // +optional ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` // defaultNodeSelector helps set the cluster-wide default node selector to diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go new file mode 100644 index 000000000..4d642e060 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -0,0 +1,46 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into +// the payload for later analysis on a per-payload basis. +// This doesn't need any CRD because it's never stored in the cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type TestReporting struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec TestReportingSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status TestReportingStatus `json:"status"` +} + +type TestReportingSpec struct { + // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` +} + +type FeatureGateTests struct { + // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + FeatureGate string `json:"featureGate"` + + // Tests contains an item for every TestName + Tests []TestDetails `json:"tests"` +} + +type TestDetails struct { + // TestName is the name of the test as it appears in junit XMLs. + // It does not include the suite name since the same test can be executed in many suites. + TestName string `json:"testName"` +} + +type TestReportingStatus struct { +} diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index c5dea1a03..b18ef647c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -211,6 +211,7 @@ type TLSProfileSpec struct { // ciphers: // - DES-CBC3-SHA // + // +listType=atomic Ciphers []string `json:"ciphers"` // minTLSVersion is used to specify the minimal version of the TLS protocol // that is negotiated during the TLS handshake. For example, to use TLS diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index ff9409905..069346998 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -2039,23 +2039,6 @@ func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateDescription) DeepCopyInto(out *FeatureGateDescription) { - *out = *in - out.FeatureGateAttributes = in.FeatureGateAttributes - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDescription. -func (in *FeatureGateDescription) DeepCopy() *FeatureGateDescription { - if in == nil { - return nil - } - out := new(FeatureGateDescription) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) { *out = *in @@ -2082,32 +2065,6 @@ func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]FeatureGateDescription, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]FeatureGateDescription, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. -func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { - if in == nil { - return nil - } - out := new(FeatureGateEnabledDisabled) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { *out = *in @@ -2209,6 +2166,27 @@ func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateTests) DeepCopyInto(out *FeatureGateTests) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]TestDetails, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateTests. +func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { + if in == nil { + return nil + } + out := new(FeatureGateTests) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { *out = *in @@ -3597,6 +3575,84 @@ func (in *Network) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnostics) DeepCopyInto(out *NetworkDiagnostics) { + *out = *in + in.SourcePlacement.DeepCopyInto(&out.SourcePlacement) + in.TargetPlacement.DeepCopyInto(&out.TargetPlacement) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnostics. +func (in *NetworkDiagnostics) DeepCopy() *NetworkDiagnostics { + if in == nil { + return nil + } + out := new(NetworkDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopyInto(out *NetworkDiagnosticsSourcePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsSourcePlacement. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopy() *NetworkDiagnosticsSourcePlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsSourcePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopyInto(out *NetworkDiagnosticsTargetPlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsTargetPlacement. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopy() *NetworkDiagnosticsTargetPlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsTargetPlacement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkList) DeepCopyInto(out *NetworkList) { *out = *in @@ -3669,6 +3725,7 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = new(ExternalIPConfig) (*in).DeepCopyInto(*out) } + in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics) return } @@ -3726,7 +3783,7 @@ func (in *Node) DeepCopyInto(out *Node) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -3800,6 +3857,13 @@ func (in *NodeSpec) DeepCopy() *NodeSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -5494,6 +5558,81 @@ func (in *TemplateReference) DeepCopy() *TemplateReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestDetails) DeepCopyInto(out *TestDetails) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestDetails. +func (in *TestDetails) DeepCopy() *TestDetails { + if in == nil { + return nil + } + out := new(TestDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReporting) DeepCopyInto(out *TestReporting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReporting. +func (in *TestReporting) DeepCopy() *TestReporting { + if in == nil { + return nil + } + out := new(TestReporting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingSpec) DeepCopyInto(out *TestReportingSpec) { + *out = *in + if in.TestsForFeatureGates != nil { + in, out := &in.TestsForFeatureGates, &out.TestsForFeatureGates + *out = make([]FeatureGateTests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingSpec. +func (in *TestReportingSpec) DeepCopy() *TestReportingSpec { + if in == nil { + return nil + } + out := new(TestReportingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingStatus) DeepCopyInto(out *TestReportingStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingStatus. +func (in *TestReportingStatus) DeepCopy() *TestReportingStatus { + if in == nil { + return nil + } + out := new(TestReportingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..fa5dd4e31 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,509 @@ +apiservers.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: apiservers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: APIServer + Labels: {} + PluralName: apiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +authentications.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: authentications.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ExternalOIDC + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Authentication + Labels: {} + PluralName: authentications + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +builds.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: builds.config.openshift.io + Capability: Build + Category: "" + FeatureGates: [] + FilenameOperatorName: openshift-controller-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Build + Labels: {} + PluralName: builds + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +clusteroperators.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/497 + CRDName: clusteroperators.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterOperator + Labels: {} + PluralName: clusteroperators + PrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + Scope: Cluster + ShortNames: + - co + TopLevelFeatureGates: [] + Version: v1 + +clusterversions.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/495 + CRDName: clusterversions.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SignatureStores + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterVersion + Labels: {} + PluralName: clusterversions + PrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +consoles.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: consoles.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Console + Labels: {} + PluralName: consoles + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +dnses.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: dnses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: DNS + Labels: {} + PluralName: dnses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +featuregates.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: featuregates.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: FeatureGate + Labels: {} + PluralName: featuregates + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +images.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: images.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ImageStreamImportMode + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Image + Labels: {} + PluralName: images + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagecontentpolicies.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/874 + CRDName: imagecontentpolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageContentPolicy + Labels: {} + PluralName: imagecontentpolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagedigestmirrorsets.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagedigestmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageDigestMirrorSet + Labels: {} + PluralName: imagedigestmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - idms + TopLevelFeatureGates: [] + Version: v1 + +imagetagmirrorsets.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagetagmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageTagMirrorSet + Labels: {} + PluralName: imagetagmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - itms + TopLevelFeatureGates: [] + Version: v1 + +infrastructures.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: infrastructures.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - BareMetalLoadBalancer + - GCPClusterHostedDNS + - GCPLabelsTags + - VSphereControlPlaneMachineSet + - VSphereMultiNetworks + - VSphereMultiVCenters + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Infrastructure + Labels: {} + PluralName: infrastructures + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +ingresses.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: ingresses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Ingress + Labels: {} + PluralName: ingresses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +networks.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: networks.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - NetworkDiagnosticsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: Network + Labels: {} + PluralName: networks + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +nodes.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1107 + CRDName: nodes.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Node + Labels: {} + PluralName: nodes + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +oauths.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: oauths.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: OAuth + Labels: {} + PluralName: oauths + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +operatorhubs.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: operatorhubs.config.openshift.io + Capability: marketplace + Category: "" + FeatureGates: [] + FilenameOperatorName: marketplace + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: OperatorHub + Labels: {} + PluralName: operatorhubs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +projects.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: projects.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Project + Labels: {} + PluralName: projects + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +proxies.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: proxies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: Proxy + Labels: {} + PluralName: proxies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +schedulers.config.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: schedulers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - DynamicResourceAllocation + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Scheduler + Labels: {} + PluralName: schedulers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index f751368b3..c580bd834 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -1016,6 +1016,7 @@ var map_ImageSpec = map[string]string{ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", + "imageStreamImportMode": "imageStreamImportMode controls the import mode behaviour of imagestreams. It can be set to `Legacy` or `PreserveOriginal` or the empty string. If this value is specified, this setting is applied to all newly created imagestreams which do not have the value set. `Legacy` indicates that the legacy behaviour should be used. For manifest lists, the legacy behaviour will discard the manifest list and import a single sub-manifest. In this case, the platform is chosen in the following order of priority: 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, the manifest list and all its sub-manifests will be imported. When empty, the behaviour will be decided based on the payload type advertised by the ClusterVersion status, i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal.", } func (ImageSpec) SwaggerDoc() map[string]string { @@ -1025,6 +1026,7 @@ func (ImageSpec) SwaggerDoc() map[string]string { var map_ImageStatus = map[string]string{ "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname.", "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "imageStreamImportMode": "imageStreamImportMode controls the import mode behaviour of imagestreams. It can be `Legacy` or `PreserveOriginal`. `Legacy` indicates that the legacy behaviour should be used. For manifest lists, the legacy behaviour will discard the manifest list and import a single sub-manifest. In this case, the platform is chosen in the following order of priority: 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. `PreserveOriginal` indicates that the original manifest will be preserved. For manifest lists, the manifest list and all its sub-manifests will be imported. This value will be reconciled based on either the spec value or if no spec value is specified, the image registry operator would look at the ClusterVersion status to determine the payload type and set the import mode accordingly, i.e single arch payload implies the import mode is Legacy and multi payload implies PreserveOriginal.", } func (ImageStatus) SwaggerDoc() map[string]string { @@ -1439,7 +1441,7 @@ func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { var map_IBMCloudServiceEndpoint = map[string]string{ "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", - "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", + "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, COSConfig, DNSServices, GlobalCatalog, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", } @@ -1778,7 +1780,7 @@ func (VSpherePlatformNodeNetworkingSpec) SwaggerDoc() map[string]string { var map_VSpherePlatformSpec = map[string]string{ "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. In the future the cloud provider operator, storage operator and machine operator will use these fields for configuration.", - "vcenters": "vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported.", + "vcenters": "vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported, but in tech preview 3 vCenters are supported. Once the cluster has been installed, you are unable to change the current number of defined vCenters except in the case where the cluster has been upgraded from a version of OpenShift where the vsphere platform spec was not present. You may make modifications to the existing vCenters that are defined in the vcenters list in order to match with any added or modified failure domains.", "failureDomains": "failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used.", "nodeNetworking": "nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found.", "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).", @@ -1809,7 +1811,7 @@ var map_VSpherePlatformTopology = map[string]string{ "": "VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, computeCluster, networks, datastore and resourcePool - to provision virtual machines.", "datacenter": "datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters.", "computeCluster": "computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters.", - "networks": "networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/.", + "networks": "networks is the list of port group network names within this failure domain. If feature gate VSphereMultiNetworks is enabled, up to 10 network adapters may be defined. 10 is the maximum number of virtual network devices which may be attached to a VM as defined by: https://configmax.esp.vmware.com/guest?vmwareproduct=vSphere&release=vSphere%208.0&categories=1-0 The available networks (port groups) can be listed using `govc ls 'network/*'` Networks should be in the form of an absolute path: //network/.", "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.", "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.", "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.", @@ -1987,6 +1989,36 @@ func (Network) SwaggerDoc() map[string]string { return map_Network } +var map_NetworkDiagnostics = map[string]string{ + "mode": "mode controls the network diagnostics mode\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is All.", + "sourcePlacement": "sourcePlacement controls the scheduling of network diagnostics source deployment\n\nSee NetworkDiagnosticsSourcePlacement for more details about default values.", + "targetPlacement": "targetPlacement controls the scheduling of network diagnostics target daemonset\n\nSee NetworkDiagnosticsTargetPlacement for more details about default values.", +} + +func (NetworkDiagnostics) SwaggerDoc() map[string]string { + return map_NetworkDiagnostics +} + +var map_NetworkDiagnosticsSourcePlacement = map[string]string{ + "": "NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is an empty list.", +} + +func (NetworkDiagnosticsSourcePlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsSourcePlacement +} + +var map_NetworkDiagnosticsTargetPlacement = map[string]string{ + "": "NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `- operator: \"Exists\"` which means that all taints are tolerated.", +} + +func (NetworkDiagnosticsTargetPlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsTargetPlacement +} + var map_NetworkList = map[string]string{ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1997,9 +2029,9 @@ func (NetworkList) SwaggerDoc() map[string]string { } var map_NetworkMigration = map[string]string{ - "": "NetworkMigration represents the cluster network configuration.", - "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes", - "mtu": "MTU contains the MTU migration configuration.", + "": "NetworkMigration represents the network migration status.", + "networkType": "NetworkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "mtu": "MTU is the MTU configuration that is being deployed.", } func (NetworkMigration) SwaggerDoc() map[string]string { @@ -2010,9 +2042,10 @@ var map_NetworkSpec = map[string]string{ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", - "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", + "networkType": "NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", + "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", } func (NetworkSpec) SwaggerDoc() map[string]string { @@ -2023,10 +2056,10 @@ var map_NetworkStatus = map[string]string{ "": "NetworkStatus is the current network configuration.", "clusterNetwork": "IP address pool to use for pod IPs.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", - "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", + "networkType": "NetworkType is the plugin that is deployed (e.g. OVNKubernetes).", "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", "migration": "Migration contains the cluster network migration configuration.", - "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkTypeMigrationInProgress\", \"NetworkTypeMigrationMTUReady\", \"NetworkTypeMigrationTargetCNIAvailable\", \"NetworkTypeMigrationTargetCNIInUse\" and \"NetworkTypeMigrationOriginalCNIPurged\"", + "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"", } func (NetworkStatus) SwaggerDoc() map[string]string { @@ -2062,6 +2095,14 @@ func (NodeSpec) SwaggerDoc() map[string]string { return map_NodeSpec } +var map_NodeStatus = map[string]string{ + "conditions": "conditions contain the details and the current state of the nodes.config object", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + var map_BasicAuthIdentityProvider = map[string]string{ "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", } @@ -2476,6 +2517,41 @@ func (SchedulerSpec) SwaggerDoc() map[string]string { return map_SchedulerSpec } +var map_FeatureGateTests = map[string]string{ + "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "Tests contains an item for every TestName", +} + +func (FeatureGateTests) SwaggerDoc() map[string]string { + return map_FeatureGateTests +} + +var map_TestDetails = map[string]string{ + "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", +} + +func (TestDetails) SwaggerDoc() map[string]string { + return map_TestDetails +} + +var map_TestReporting = map[string]string{ + "": "TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into the payload for later analysis on a per-payload basis. This doesn't need any CRD because it's never stored in the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (TestReporting) SwaggerDoc() map[string]string { + return map_TestReporting +} + +var map_TestReportingSpec = map[string]string{ + "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", +} + +func (TestReportingSpec) SwaggerDoc() map[string]string { + return map_TestReportingSpec +} + var map_CustomTLSProfile = map[string]string{ "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 697eb7332..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,142 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1482 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: backups.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "Backup provides configuration for performing backups of the - openshift cluster. \n Compatibility level 4: No compatibility is provided, - the API can change at any point for any reason. These capabilities should - not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - etcd: - description: etcd specifies the configuration for periodic backups - of the etcd cluster - properties: - pvcName: - description: PVCName specifies the name of the PersistentVolumeClaim - (PVC) which binds a PersistentVolume where the etcd backup files - would be saved The PVC itself must always be created in the - "openshift-etcd" namespace If the PVC is left unspecified "" - then the platform will choose a reasonable default location - to save the backup. In the future this would be backups saved - across the control-plane master nodes. - type: string - retentionPolicy: - description: RetentionPolicy defines the retention policy for - retaining and deleting existing backups. - properties: - retentionNumber: - description: RetentionNumber configures the retention policy - based on the number of backups - properties: - maxNumberOfBackups: - description: MaxNumberOfBackups defines the maximum number - of backups to retain. If the existing number of backups - saved is equal to MaxNumberOfBackups then the oldest - backup will be removed before a new backup is initiated. - minimum: 1 - type: integer - required: - - maxNumberOfBackups - type: object - retentionSize: - description: RetentionSize configures the retention policy - based on the size of backups - properties: - maxSizeOfBackupsGb: - description: MaxSizeOfBackupsGb defines the total size - in GB of backups to retain. If the current total size - backups exceeds MaxSizeOfBackupsGb then the oldest backup - will be removed before a new backup is initiated. - minimum: 1 - type: integer - required: - - maxSizeOfBackupsGb - type: object - retentionType: - allOf: - - enum: - - RetentionNumber - - RetentionSize - - enum: - - "" - - RetentionNumber - - RetentionSize - description: RetentionType sets the type of retention policy. - Currently, the only valid policies are retention by number - of backups (RetentionNumber), by the size of backups (RetentionSize). - More policies or types may be added in the future. Empty - string means no opinion and the platform is left to choose - a reasonable default which is subject to change without - notice. The current default is RetentionNumber with 15 backups - kept. - type: string - required: - - retentionType - type: object - schedule: - description: 'Schedule defines the recurring backup schedule in - Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 - * * * Empty string means no opinion and the platform is left - to choose a reasonable default which is subject to change without - notice. The current default is "no backups", but will change - in the future.' - pattern: ^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) - (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) - (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) - (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) - (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$ - type: string - timeZone: - description: The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. - If not specified, this will default to the time zone of the - kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - pattern: ^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$ - type: string - type: object - required: - - etcd - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 69dbe3a2f..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: clusterimagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterImagePolicy - listKind: ClusterImagePolicyList - plural: clusterimagepolicies - singular: clusterimagepolicy - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ClusterImagePolicy holds cluster-wide configuration for image - signature verification \n Compatibility level 4: No compatibility is provided, - the API can change at any point for any reason. These capabilities should - not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec contains the configuration for the cluster image policy. - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix” if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix” - (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 834c03ae1..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: clusterimagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterImagePolicy - listKind: ClusterImagePolicyList - plural: clusterimagepolicies - singular: clusterimagepolicy - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ClusterImagePolicy holds cluster-wide configuration for image - signature verification \n Compatibility level 4: No compatibility is provided, - the API can change at any point for any reason. These capabilities should - not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec contains the configuration for the cluster image policy. - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix” if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix” - (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 642a19f78..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: imagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImagePolicy - listKind: ImagePolicyList - plural: imagepolicies - singular: imagepolicy - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ImagePolicy holds namespace-wide configuration for image signature - verification \n Compatibility level 4: No compatibility is provided, the - API can change at any point for any reason. These capabilities should not - be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix” if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix” - (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 2f5ea8863..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: imagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImagePolicy - listKind: ImagePolicyList - plural: imagepolicies - singular: imagepolicy - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ImagePolicy holds namespace-wide configuration for image signature - verification \n Compatibility level 4: No compatibility is provided, the - API can change at any point for any reason. These capabilities should not - be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix” if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix” - (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml deleted file mode 100644 index b23e917c9..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1245 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: insightsdatagathers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: InsightsDataGather - listKind: InsightsDataGatherList - plural: insightsdatagathers - singular: insightsdatagather - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "InsightsDataGather provides data gather configuration options - for the the Insights Operator. \n Compatibility level 4: No compatibility - is provided, the API can change at any point for any reason. These capabilities - should not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - gatherConfig: - description: gatherConfig spec attribute includes all the configuration - options related to gathering of the Insights data and its uploading - to the ingress. - properties: - dataPolicy: - description: dataPolicy allows user to enable additional global - obfuscation of the IP addresses and base domain in the Insights - archive data. Valid values are "None" and "ObfuscateNetworking". - When set to None the data is not obfuscated. When set to ObfuscateNetworking - the IP addresses and the cluster domain name are obfuscated. - When omitted, this means no opinion and the platform is left - to choose a reasonable default, which is subject to change over - time. The current default is None. - enum: - - "" - - None - - ObfuscateNetworking - type: string - disabledGatherers: - description: 'disabledGatherers is a list of gatherers to be excluded - from the gathering. All the gatherers can be disabled by providing - "all" value. If all the gatherers are disabled, the Insights - operator does not gather any data. The particular gatherers - IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. - Run the following command to get the names of last active gatherers: - "oc get insightsoperators.operator.openshift.io cluster -o json - | jq ''.status.gatherStatus.gatherers[].name''" An example of - disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", - "workloads/workload_info"]`' - items: - type: string - type: array - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml deleted file mode 100644 index 232bdf037..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] ClusterImagePolicy" -crd: 0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml deleted file mode 100644 index 05b1487fa..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] ImagePolicy" -crd: 0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml deleted file mode 100644 index 91836dd93..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml +++ /dev/null @@ -1,202 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreview] Backup" -crd: 0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a Backup with a valid spec - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - pvcName: etcdbackup-pvc - - name: Should be able to create an EtcdBackup without the pvcName specified - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - - name: Should be able to create a Backup with a valid schedule - At 22:00 on every day-of-week from Monday through Friday - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "0 22 * * 1-5" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "0 22 * * 1-5" - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid schedule - At 04:05 on Sunday. - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "5 4 * * SUN" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "5 4 * * SUN" - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid schedule - Predefined hourly - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "@hourly" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "@hourly" - pvcName: etcdbackup-pvc - - name: Should fail to create an EtcdBackup with an invalid schedule - At 04:05 on invalid day FOO. - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "5 4 * * FOO" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Predefined typo @hourli instead of @hourly. - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "@hourli" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard L last Friday in month - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* * * * 5L" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard L 5th day before last day of month - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* * L-5 * *" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard W closest weekday to 15th of month - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* * 15W * *" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should be able to create a Backup with a valid time zone - Africa/Banjul - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Africa/Banjul - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Africa/Banjul - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid time zone - Etc/GMT-8 - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/GMT-8 - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/GMT-8 - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid time zone - Etc/UTC - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/UTC - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/UTC - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid time zone - America/Argentina/Catamarca - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: America/Argentina/Catamarca - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: America/Argentina/Catamarca - pvcName: etcdbackup-pvc - - name: Should fail to create an EtcdBackup with an invalid time zone - GMT2 - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: GMT2 - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.timeZone in body should match" - - name: Should fail to create an EtcdBackup with an invalid time zone - GMT+3 - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: GMT+3 - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.timeZone in body should match" - diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml deleted file mode 100644 index 625310667..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] ClusterImagePolicy" -crd: 0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml deleted file mode 100644 index b469d4c52..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] ImagePolicy" -crd: 0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml deleted file mode 100644 index f73792738..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] InsightsDataGather" -crd: 0000_10_config-operator_01_insightsdatagather.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal InsightsDataGather - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: InsightsDataGather - spec: {} # No spec is required for a InsightsDataGather - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: InsightsDataGather - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index 9af55b540..65eb5c1f7 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -9,6 +9,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Backup provides configuration for performing backups of the openshift cluster. // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=backups,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1482 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=AutomatedEtcdBackup // +openshift:compatibility-gen:level=4 type Backup struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go index 5c44e0e74..e3670f03e 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -9,6 +9,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // ClusterImagePolicy holds cluster-wide configuration for image signature verification // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification // +openshift:compatibility-gen:level=4 type ClusterImagePolicy struct { metav1.TypeMeta `json:",inline"` @@ -32,8 +38,9 @@ type ClusterImagePolicySpec struct { // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. - // Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images. - // If configured, the policies for OpenShift Container Platform repositories will not be in effect. + // If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker // +kubebuilder:validation:Required diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index e9ec90902..7031110ff 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -8,6 +8,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // ImagePolicy holds namespace-wide configuration for image signature verification // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification // +openshift:compatibility-gen:level=4 type ImagePolicy struct { metav1.TypeMeta `json:",inline"` @@ -31,8 +37,9 @@ type ImagePolicySpec struct { // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. - // Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images. - // If configured, the policies for OpenShift Container Platform repositories will not be in effect. + // If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker // +kubebuilder:validation:Required diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go index 4dcdb2ec4..171e96d5b 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -9,6 +9,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // InsightsDataGather provides data gather configuration options for the the Insights Operator. // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1245 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig // +openshift:compatibility-gen:level=4 type InsightsDataGather struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..393365b41 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,92 @@ +backups.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1482 + CRDName: backups.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AutomatedEtcdBackup + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Backup + Labels: {} + PluralName: backups + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - AutomatedEtcdBackup + Version: v1alpha1 + +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1457 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1alpha1 + +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1457 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1alpha1 + +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1245 + CRDName: insightsdatagathers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1alpha1 + diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index efaac4fa2..9da086efc 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -102,7 +102,7 @@ func (ClusterImagePolicyList) SwaggerDoc() map[string]string { var map_ClusterImagePolicySpec = map[string]string{ "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", - "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images. If configured, the policies for OpenShift Container Platform repositories will not be in effect. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", } @@ -151,7 +151,7 @@ func (ImagePolicyList) SwaggerDoc() map[string]string { var map_ImagePolicySpec = map[string]string{ "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", - "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images. If configured, the policies for OpenShift Container Platform repositories will not be in effect. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", } diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto index 0b7ae7182..6b5f24cb2 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.proto +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -45,7 +45,7 @@ message DockerImageReference { message Image { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DockerImageReference is the string that can be used to pull this image. optional string dockerImageReference = 2; @@ -53,7 +53,7 @@ message Image { // DockerImageMetadata contains metadata about this image // +patchStrategy=replace // +kubebuilder:pruning:PreserveUnknownFields - optional k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" optional string dockerImageMetadataVersion = 4; @@ -115,10 +115,10 @@ message ImageBlobReferences { // ImageImportSpec describes a request to import a specific image. message ImageImportSpec { // From is the source of an image to import; only kind DockerImage is allowed - optional k8s.io.api.core.v1.ObjectReference from = 1; + optional .k8s.io.api.core.v1.ObjectReference from = 1; // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used - optional k8s.io.api.core.v1.LocalObjectReference to = 2; + optional .k8s.io.api.core.v1.LocalObjectReference to = 2; // ImportPolicy is the policy controlling how the image is imported optional TagImportPolicy importPolicy = 3; @@ -133,7 +133,7 @@ message ImageImportSpec { // ImageImportStatus describes the result of an image import. message ImageImportStatus { // Status is the status of the image import, including errors encountered while retrieving the image - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; // Image is the metadata of that image, if the image was located optional Image image = 2; @@ -174,7 +174,7 @@ message ImageLayerData { message ImageList { // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of images repeated Image items = 2; @@ -226,7 +226,7 @@ message ImageManifest { message ImageSignature { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Required: Describes a type of stored blob. optional string type = 2; @@ -247,7 +247,7 @@ message ImageSignature { map signedClaims = 6; // If specified, it is the time of signature's creation. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7; // If specified, it holds information about an issuer of signing certificate or key (a person or entity // who signed the signing certificate or key). @@ -278,7 +278,7 @@ message ImageSignature { message ImageStream { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec describes the desired state of this stream // +optional @@ -307,7 +307,7 @@ message ImageStream { message ImageStreamImage { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Image associated with the ImageStream and image name. optional Image image = 2; @@ -327,7 +327,7 @@ message ImageStreamImage { message ImageStreamImport { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is a description of the images that the user wishes to import optional ImageStreamImportSpec spec = 2; @@ -370,7 +370,7 @@ message ImageStreamImportStatus { message ImageStreamLayers { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // blobs is a map of blob name to metadata about the blob. map blobs = 2; @@ -387,7 +387,7 @@ message ImageStreamLayers { message ImageStreamList { // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of imageStreams repeated ImageStream items = 2; @@ -407,7 +407,7 @@ message ImageStreamList { message ImageStreamMapping { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Image is a container image. optional Image image = 2; @@ -464,7 +464,7 @@ message ImageStreamStatus { message ImageStreamTag { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // tag is the spec tag associated with this image stream tag, and it may be null // if only pushes have occurred to this image stream. @@ -494,7 +494,7 @@ message ImageStreamTag { message ImageStreamTagList { // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of image stream tags repeated ImageStreamTag items = 2; @@ -514,7 +514,7 @@ message ImageStreamTagList { message ImageTag { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the spec tag associated with this image stream tag, and it may be null // if only pushes have occurred to this image stream. @@ -541,7 +541,7 @@ message ImageTag { message ImageTagList { // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of image stream tags repeated ImageTag items = 2; @@ -562,7 +562,7 @@ message NamedTagEventList { // RepositoryImportSpec describes a request to import images from a container image repository. message RepositoryImportSpec { // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed - optional k8s.io.api.core.v1.ObjectReference from = 1; + optional .k8s.io.api.core.v1.ObjectReference from = 1; // ImportPolicy is the policy controlling how the image is imported optional TagImportPolicy importPolicy = 2; @@ -577,7 +577,7 @@ message RepositoryImportSpec { // RepositoryImportStatus describes the result of an image repository import message RepositoryImportStatus { // Status reflects whether any failure occurred during import - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; // Images is a list of images successfully retrieved by the import of the repository. repeated ImageImportStatus images = 2; @@ -593,11 +593,11 @@ message SecretList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: https://kubernetes.io/docs/concepts/configuration/secret - repeated k8s.io.api.core.v1.Secret items = 2; + repeated .k8s.io.api.core.v1.Secret items = 2; } // SignatureCondition describes an image signature condition of particular kind at particular probe time. @@ -609,10 +609,10 @@ message SignatureCondition { optional string status = 2; // Last time the condition was checked. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transit from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. optional string reason = 5; @@ -649,7 +649,7 @@ message SignatureSubject { // TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. message TagEvent { // Created holds the time the TagEvent was created - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; // DockerImageReference is the string that can be used to pull this image optional string dockerImageReference = 2; @@ -670,7 +670,7 @@ message TagEventCondition { optional string status = 2; // LastTransitionTIme is the time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // Reason is a brief machine readable explanation for the condition's last transition. optional string reason = 4; @@ -706,7 +706,7 @@ message TagReference { // Optional; if specified, a reference to another image that this tag should point to. Valid values // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references // can only reference a tag within this same ImageStream. - optional k8s.io.api.core.v1.ObjectReference from = 3; + optional .k8s.io.api.core.v1.ObjectReference from = 3; // Reference states if the tag will be imported. Default value is false, which means the tag will // be imported. diff --git a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml deleted file mode 100644 index a533efbc1..000000000 --- a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml +++ /dev/null @@ -1,365 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: securitycontextconstraints.security.openshift.io -spec: - group: security.openshift.io - names: - kind: SecurityContextConstraints - listKind: SecurityContextConstraintsList - plural: securitycontextconstraints - singular: securitycontextconstraints - scope: Cluster - versions: - - additionalPrinterColumns: - - description: Determines if a container can request to be run as privileged - jsonPath: .allowPrivilegedContainer - name: Priv - type: string - - description: A list of capabilities that can be requested to add to the container - jsonPath: .allowedCapabilities - name: Caps - type: string - - description: Strategy that will dictate what labels will be set in the SecurityContext - jsonPath: .seLinuxContext.type - name: SELinux - type: string - - description: Strategy that will dictate what RunAsUser is used in the SecurityContext - jsonPath: .runAsUser.type - name: RunAsUser - type: string - - description: Strategy that will dictate what fs group is used by the SecurityContext - jsonPath: .fsGroup.type - name: FSGroup - type: string - - description: Strategy that will dictate what supplemental groups are used by - the SecurityContext - jsonPath: .supplementalGroups.type - name: SupGroup - type: string - - description: Sort order of SCCs - jsonPath: .priority - name: Priority - type: string - - description: Force containers to run with a read only root file system - jsonPath: .readOnlyRootFilesystem - name: ReadOnlyRootFS - type: string - - description: White list of allowed volume plugins - jsonPath: .volumes - name: Volumes - type: string - name: v1 - schema: - openAPIV3Schema: - description: "SecurityContextConstraints governs the ability to make requests - that affect the SecurityContext that will be applied to a container. For - historical reasons SCC was exposed under the core Kubernetes API group. - That exposure is deprecated and will be removed in a future release - users - should instead use the security.openshift.io group to manage SecurityContextConstraints. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - allowHostDirVolumePlugin: - description: AllowHostDirVolumePlugin determines if the policy allow containers - to use the HostDir volume plugin - type: boolean - allowHostIPC: - description: AllowHostIPC determines if the policy allows host ipc in - the containers. - type: boolean - allowHostNetwork: - description: AllowHostNetwork determines if the policy allows the use - of HostNetwork in the pod spec. - type: boolean - allowHostPID: - description: AllowHostPID determines if the policy allows host pid in - the containers. - type: boolean - allowHostPorts: - description: AllowHostPorts determines if the policy allows host ports - in the containers. - type: boolean - allowPrivilegeEscalation: - description: AllowPrivilegeEscalation determines if a pod can request - to allow privilege escalation. If unspecified, defaults to true. - nullable: true - type: boolean - allowPrivilegedContainer: - description: AllowPrivilegedContainer determines if a container can request - to be run as privileged. - type: boolean - allowedCapabilities: - description: AllowedCapabilities is a list of capabilities that can be - requested to add to the container. Capabilities in this field maybe - added at the pod author's discretion. You must not list a capability - in both AllowedCapabilities and RequiredDropCapabilities. To allow all - capabilities you may use '*'. - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - type: array - allowedFlexVolumes: - description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty - or nil indicates that all Flexvolumes may be used. This parameter is - effective only when the usage of the Flexvolumes is allowed in the "Volumes" - field. - items: - description: AllowedFlexVolume represents a single Flexvolume that is - allowed to be used. - properties: - driver: - description: Driver is the name of the Flexvolume driver. - type: string - required: - - driver - type: object - nullable: true - type: array - allowedUnsafeSysctls: - description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe - sysctls, defaults to none. Each entry is either a plain sysctl name - or ends in \"*\" in which case it is considered as a prefix of allowed - sysctls. Single * means all unsafe sysctls are allowed. Kubelet has - to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. - \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc." - items: - type: string - nullable: true - type: array - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - defaultAddCapabilities: - description: DefaultAddCapabilities is the default set of capabilities - that will be added to the container unless the pod spec specifically - drops the capability. You may not list a capabiility in both DefaultAddCapabilities - and RequiredDropCapabilities. - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - type: array - defaultAllowPrivilegeEscalation: - description: DefaultAllowPrivilegeEscalation controls the default setting - for whether a process can gain more privileges than its parent process. - nullable: true - type: boolean - forbiddenSysctls: - description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, - defaults to none. Each entry is either a plain sysctl name or ends in - \"*\" in which case it is considered as a prefix of forbidden sysctls. - Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" - forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", - \"foo.baz\", etc." - items: - type: string - nullable: true - type: array - fsGroup: - description: FSGroup is the strategy that will dictate what fs group is - used by the SecurityContext. - nullable: true - properties: - ranges: - description: Ranges are the allowed ranges of fs groups. If you would - like to force a single fs group then supply a single range with - the same start and end. - items: - description: 'IDRange provides a min/max of an allowed range of - IDs. TODO: this could be reused for UIDs.' - properties: - max: - description: Max is the end of the range, inclusive. - format: int64 - type: integer - min: - description: Min is the start of the range, inclusive. - format: int64 - type: integer - type: object - type: array - type: - description: Type is the strategy that will dictate what FSGroup is - used in the SecurityContext. - type: string - type: object - groups: - description: The groups that have permission to use this security context - constraints - items: - type: string - nullable: true - type: array - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - priority: - description: Priority influences the sort order of SCCs when evaluating - which SCCs to try first for a given pod request based on access in the - Users and Groups fields. The higher the int, the higher priority. An - unset value is considered a 0 priority. If scores for multiple SCCs - are equal they will be sorted from most restrictive to least restrictive. - If both priorities and restrictions are equal the SCCs will be sorted - by name. - format: int32 - nullable: true - type: integer - readOnlyRootFilesystem: - description: ReadOnlyRootFilesystem when set to true will force containers - to run with a read only root file system. If the container specifically - requests to run with a non-read only root file system the SCC should - deny the pod. If set to false the container may run with a read only - root file system if it wishes but it will not be forced to. - type: boolean - requiredDropCapabilities: - description: RequiredDropCapabilities are the capabilities that will be - dropped from the container. These are required to be dropped and cannot - be added. - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - type: array - runAsUser: - description: RunAsUser is the strategy that will dictate what RunAsUser - is used in the SecurityContext. - nullable: true - properties: - type: - description: Type is the strategy that will dictate what RunAsUser - is used in the SecurityContext. - type: string - uid: - description: UID is the user id that containers must run as. Required - for the MustRunAs strategy if not using namespace/service account - allocated uids. - format: int64 - type: integer - uidRangeMax: - description: UIDRangeMax defines the max value for a strategy that - allocates by range. - format: int64 - type: integer - uidRangeMin: - description: UIDRangeMin defines the min value for a strategy that - allocates by range. - format: int64 - type: integer - type: object - seLinuxContext: - description: SELinuxContext is the strategy that will dictate what labels - will be set in the SecurityContext. - nullable: true - properties: - seLinuxOptions: - description: seLinuxOptions required to run as; required for MustRunAs - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. - type: string - type: object - type: - description: Type is the strategy that will dictate what SELinux context - is used in the SecurityContext. - type: string - type: object - seccompProfiles: - description: "SeccompProfiles lists the allowed profiles that may be set - for the pod or container's seccomp annotations. An unset (nil) or empty - value means that no profiles may be specifid by the pod or container.\tThe - wildcard '*' may be used to allow all profiles. When used to generate - a value for a pod the first non-wildcard profile will be used as the - default." - items: - type: string - nullable: true - type: array - supplementalGroups: - description: SupplementalGroups is the strategy that will dictate what - supplemental groups are used by the SecurityContext. - nullable: true - properties: - ranges: - description: Ranges are the allowed ranges of supplemental groups. If - you would like to force a single supplemental group then supply - a single range with the same start and end. - items: - description: 'IDRange provides a min/max of an allowed range of - IDs. TODO: this could be reused for UIDs.' - properties: - max: - description: Max is the end of the range, inclusive. - format: int64 - type: integer - min: - description: Min is the start of the range, inclusive. - format: int64 - type: integer - type: object - type: array - type: - description: Type is the strategy that will dictate what supplemental - groups is used in the SecurityContext. - type: string - type: object - users: - description: The users who have permissions to use this security context - constraints - items: - type: string - nullable: true - type: array - volumes: - description: Volumes is a white list of allowed volume plugins. FSType - corresponds directly with the field names of a VolumeSource (azureFile, - configMap, emptyDir). To allow all volumes you may use "*". To allow - no volumes, set to ["none"]. - items: - description: FS Type gives strong typing to different file systems that - are used by volumes. - type: string - nullable: true - type: array - required: - - allowHostDirVolumePlugin - - allowHostIPC - - allowHostNetwork - - allowHostPID - - allowHostPorts - - allowPrivilegedContainer - - allowedCapabilities - - defaultAddCapabilities - - priority - - readOnlyRootFilesystem - - requiredDropCapabilities - - volumes - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/security/v1/consts.go b/vendor/github.com/openshift/api/security/v1/consts.go index 3b686c31d..7e8adf6e6 100644 --- a/vendor/github.com/openshift/api/security/v1/consts.go +++ b/vendor/github.com/openshift/api/security/v1/consts.go @@ -10,4 +10,7 @@ const ( // This annotation pins required SCCs for core OpenShift workloads to prevent preemption of custom SCCs. // It is being used in the SCC admission plugin. RequiredSCCAnnotation = "openshift.io/required-scc" + + // MinimallySufficientPodSecurityStandard indicates the PodSecurityStandard that matched the SCCs available to the users of the namespace. + MinimallySufficientPodSecurityStandard = "security.openshift.io/MinimallySufficientPodSecurityStandard" ) diff --git a/vendor/github.com/openshift/api/security/v1/generated.pb.go b/vendor/github.com/openshift/api/security/v1/generated.pb.go index d57b162c4..e28b59584 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/security/v1/generated.pb.go @@ -10,7 +10,7 @@ import ( proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" math "math" math_bits "math/bits" @@ -592,117 +592,120 @@ func init() { } var fileDescriptor_af65d9655aa67551 = []byte{ - // 1750 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x1c, 0x49, - 0x15, 0x77, 0x7b, 0xfc, 0x35, 0x65, 0xc7, 0x1f, 0x65, 0xc7, 0xe9, 0x35, 0xeb, 0x19, 0xd3, 0x36, - 0xab, 0x08, 0xd8, 0x19, 0x12, 0x2d, 0x6c, 0xd0, 0xb2, 0xd1, 0x4e, 0x7b, 0xd6, 0x59, 0xaf, 0x9c, - 0x64, 0xb6, 0x66, 0xbd, 0x42, 0xab, 0x15, 0xa2, 0xdc, 0x53, 0x33, 0xae, 0xb8, 0xbf, 0xe8, 0xaa, - 0x76, 0x3c, 0xe2, 0x12, 0x89, 0x0b, 0x47, 0x24, 0xae, 0x88, 0x33, 0xfc, 0x03, 0x5c, 0x10, 0x70, - 0x8d, 0x04, 0x12, 0x39, 0xa1, 0x9c, 0x46, 0x64, 0x10, 0x27, 0x8e, 0xdc, 0x72, 0x42, 0x55, 0x53, - 0xf3, 0xd1, 0x3d, 0xdd, 0xe3, 0x4e, 0x48, 0xa2, 0xbd, 0x4d, 0xbf, 0x8f, 0xdf, 0xef, 0xbd, 0xd7, - 0xaf, 0x5f, 0xbd, 0x1a, 0xf0, 0x5e, 0x8b, 0xf2, 0xd3, 0xf0, 0xa4, 0x64, 0x79, 0x4e, 0xd9, 0xf3, - 0x89, 0xcb, 0x4e, 0x69, 0x93, 0x97, 0xb1, 0x4f, 0xcb, 0x8c, 0x58, 0x61, 0x40, 0x79, 0xbb, 0x7c, - 0x7e, 0xa3, 0xdc, 0x22, 0x2e, 0x09, 0x30, 0x27, 0x8d, 0x92, 0x1f, 0x78, 0xdc, 0x83, 0x7b, 0x43, - 0xaf, 0xd2, 0xc0, 0xab, 0x84, 0x7d, 0x5a, 0xea, 0x7b, 0x95, 0xce, 0x6f, 0x6c, 0xbd, 0x3b, 0x82, - 0xdd, 0xf2, 0x5a, 0x5e, 0x59, 0x3a, 0x9f, 0x84, 0x4d, 0xf9, 0x24, 0x1f, 0xe4, 0xaf, 0x1e, 0xe8, - 0x96, 0x71, 0x76, 0x8b, 0x95, 0xa8, 0x27, 0xc9, 0x2d, 0x2f, 0x20, 0x09, 0xc4, 0x5b, 0xef, 0x0d, - 0x6d, 0x1c, 0x6c, 0x9d, 0x52, 0x97, 0x04, 0xed, 0xb2, 0x7f, 0xd6, 0x12, 0x02, 0x56, 0x76, 0x08, - 0xc7, 0x49, 0x5e, 0x3f, 0x48, 0xf3, 0x0a, 0x42, 0x97, 0x53, 0x87, 0x94, 0x99, 0x75, 0x4a, 0x1c, - 0x1c, 0xf7, 0x33, 0x3e, 0x00, 0x6b, 0x15, 0xdb, 0xf6, 0x1e, 0x92, 0xc6, 0x81, 0x4d, 0x2e, 0xbe, - 0xf0, 0xec, 0xd0, 0x21, 0xf0, 0x1d, 0x30, 0xd7, 0x08, 0xe8, 0x39, 0x09, 0x74, 0x6d, 0x47, 0xbb, - 0x9e, 0x37, 0x97, 0x1f, 0x77, 0x8a, 0x53, 0xdd, 0x4e, 0x71, 0xae, 0x2a, 0xa5, 0x48, 0x69, 0x8d, - 0xdf, 0x69, 0x60, 0xf3, 0xa0, 0x7e, 0x27, 0xf0, 0x42, 0xbf, 0xce, 0x05, 0x6a, 0xab, 0x7d, 0xdf, - 0xe7, 0xd4, 0x73, 0x19, 0x7c, 0x1f, 0xcc, 0xf0, 0xb6, 0x4f, 0x14, 0xc0, 0xae, 0x02, 0x98, 0xf9, - 0xbc, 0xed, 0x93, 0xe7, 0x9d, 0xe2, 0x7a, 0xcc, 0x4b, 0x88, 0x91, 0x74, 0x80, 0xc7, 0x60, 0x2e, - 0xc0, 0x6e, 0x8b, 0x30, 0x7d, 0x7a, 0x27, 0x77, 0x7d, 0xf1, 0xe6, 0xbb, 0xa5, 0x2c, 0x2f, 0xa2, - 0x74, 0x58, 0x45, 0xc2, 0x6b, 0x18, 0xaa, 0x7c, 0x64, 0x48, 0x81, 0x19, 0x77, 0xc0, 0xbc, 0x32, - 0x81, 0xdb, 0x20, 0xe7, 0x50, 0x57, 0x46, 0x96, 0x33, 0x17, 0x95, 0x7d, 0xee, 0x2e, 0x75, 0x91, - 0x90, 0x4b, 0x35, 0xbe, 0xd0, 0xa7, 0x63, 0x6a, 0x7c, 0x81, 0x84, 0xdc, 0xf8, 0x8f, 0x06, 0xae, - 0xd5, 0xbc, 0x46, 0x5d, 0x71, 0xd7, 0x3c, 0x9b, 0x5a, 0x6d, 0x44, 0xce, 0x29, 0x79, 0x08, 0x2d, - 0x30, 0xc3, 0x7c, 0x62, 0x49, 0xe8, 0xc5, 0x9b, 0x95, 0x6c, 0x91, 0xa7, 0x80, 0xd5, 0x7d, 0x62, - 0x99, 0x4b, 0xfd, 0xba, 0x89, 0x27, 0x24, 0xc1, 0xe1, 0x19, 0x98, 0x63, 0x1c, 0xf3, 0x90, 0xc9, - 0x10, 0x17, 0x6f, 0xee, 0xff, 0x7f, 0x34, 0x12, 0x6a, 0x58, 0xb6, 0xde, 0x33, 0x52, 0x14, 0xc6, - 0x1f, 0x35, 0xf0, 0x8d, 0x09, 0x01, 0xc2, 0xcf, 0xc0, 0x02, 0x27, 0x8e, 0x6f, 0x63, 0x4e, 0x54, - 0xd6, 0xbb, 0xa5, 0x5e, 0x27, 0xca, 0x00, 0x44, 0x8f, 0x2b, 0xf2, 0xcf, 0x95, 0x99, 0xcc, 0x6b, - 0x55, 0xd1, 0x2d, 0xf4, 0xa5, 0x68, 0x00, 0x03, 0x0f, 0xc1, 0x3a, 0x23, 0xc1, 0x39, 0xb5, 0x48, - 0xc5, 0xb2, 0xbc, 0xd0, 0xe5, 0xf7, 0xb0, 0xa3, 0xba, 0x21, 0x6f, 0x5e, 0xeb, 0x76, 0x8a, 0xeb, - 0xf5, 0x71, 0x35, 0x4a, 0xf2, 0x31, 0xfe, 0xaa, 0x81, 0xed, 0x89, 0x79, 0xc3, 0xdf, 0x6b, 0x60, - 0x13, 0xf7, 0xfa, 0x3f, 0x8a, 0xca, 0x74, 0x4d, 0xb6, 0xdf, 0x67, 0xd9, 0xaa, 0x1b, 0x75, 0x9e, - 0x5c, 0xeb, 0x82, 0x4a, 0x7e, 0xb3, 0x92, 0x48, 0x8c, 0x52, 0x02, 0x32, 0x7e, 0x39, 0x0d, 0x8c, - 0x31, 0xe4, 0x3a, 0xb1, 0x9b, 0xf5, 0xf0, 0xe4, 0x01, 0xb1, 0xb8, 0x6a, 0x42, 0x37, 0xd2, 0x84, - 0x47, 0x2f, 0xd9, 0x1d, 0x63, 0xb8, 0xa9, 0xfd, 0x18, 0xc4, 0xfa, 0xf1, 0xd3, 0x97, 0x65, 0x8c, - 0xb0, 0x4d, 0x6e, 0xcb, 0x9f, 0x83, 0x77, 0xb2, 0x45, 0xfc, 0x1a, 0x1a, 0xd4, 0x78, 0x34, 0x0d, - 0x0a, 0x93, 0xa3, 0x87, 0x0f, 0x22, 0xef, 0xe0, 0x93, 0x57, 0x52, 0x91, 0xaf, 0x53, 0xfd, 0xff, - 0xa4, 0x25, 0xb5, 0xe2, 0x1b, 0x28, 0x3e, 0xdc, 0x01, 0x33, 0x21, 0x23, 0x81, 0xcc, 0x35, 0x3f, - 0xac, 0xc7, 0x31, 0x23, 0x01, 0x92, 0x1a, 0x68, 0x80, 0xb9, 0x96, 0x38, 0x5b, 0x98, 0x9e, 0x93, - 0x23, 0x03, 0x88, 0xf8, 0xe5, 0x69, 0xc3, 0x90, 0xd2, 0x18, 0xff, 0xd5, 0xc0, 0x5e, 0x96, 0x02, - 0xc0, 0x1a, 0xc8, 0xab, 0xaf, 0xd1, 0x6c, 0x4f, 0x4a, 0xe1, 0xbe, 0x72, 0x6d, 0x92, 0x80, 0xb8, - 0x16, 0x31, 0xaf, 0x74, 0x3b, 0xc5, 0x7c, 0xa5, 0xef, 0x89, 0x86, 0x20, 0xe2, 0x6c, 0x0d, 0x08, - 0x66, 0x9e, 0xab, 0x52, 0x18, 0x1e, 0x58, 0x52, 0x8a, 0x94, 0x36, 0x52, 0xbb, 0xdc, 0xab, 0x69, - 0xdc, 0x3f, 0x68, 0x60, 0x45, 0x1e, 0x81, 0x22, 0x30, 0x0b, 0x8b, 0x83, 0x1a, 0xfe, 0x14, 0x2c, - 0x88, 0x95, 0xa2, 0x81, 0x39, 0x56, 0xf9, 0x7d, 0x6f, 0x84, 0x66, 0xb0, 0x4a, 0x94, 0xfc, 0xb3, - 0x96, 0x10, 0xb0, 0x92, 0xb0, 0x1e, 0x66, 0x7c, 0x97, 0x70, 0x6c, 0x42, 0xc5, 0x09, 0x86, 0x32, - 0x34, 0x40, 0x85, 0xbb, 0x60, 0x56, 0x9e, 0xc1, 0x2a, 0xdf, 0x2b, 0xca, 0x78, 0x56, 0x46, 0x82, - 0x7a, 0x3a, 0xf8, 0x36, 0x98, 0x91, 0x21, 0x88, 0x4c, 0x97, 0xcc, 0x05, 0xf1, 0x4a, 0xab, 0x98, - 0x63, 0x24, 0xa5, 0xc6, 0xdf, 0x35, 0xb0, 0x1e, 0x0b, 0xfc, 0x88, 0x32, 0x0e, 0xbf, 0x1a, 0x0b, - 0xbe, 0x94, 0x2d, 0x78, 0xe1, 0x2d, 0x43, 0x1f, 0x94, 0xab, 0x2f, 0x19, 0x09, 0xfc, 0x4b, 0x30, - 0x4b, 0x39, 0x71, 0xfa, 0x8b, 0xc8, 0xf7, 0xb3, 0x7d, 0x57, 0xb1, 0x38, 0x87, 0xf9, 0x1e, 0x0a, - 0x2c, 0xd4, 0x83, 0x34, 0xfe, 0xa1, 0x01, 0x1d, 0x85, 0x6e, 0x85, 0x89, 0xc6, 0x8d, 0xef, 0x4e, - 0x3f, 0x8c, 0xec, 0x4e, 0xdf, 0x8a, 0xed, 0x4e, 0x57, 0xc7, 0xfc, 0x46, 0xb6, 0xa7, 0xb7, 0x40, - 0x2e, 0xa4, 0x0d, 0xb5, 0xbc, 0xcc, 0x8b, 0xc5, 0xe5, 0xf8, 0xb0, 0x8a, 0x84, 0x0c, 0xde, 0x00, - 0x8b, 0x21, 0x6d, 0xc8, 0xf0, 0xee, 0x52, 0x57, 0x56, 0x3a, 0x67, 0xae, 0x74, 0x3b, 0xc5, 0xc5, - 0x63, 0xb5, 0x19, 0x89, 0x15, 0x68, 0xd4, 0x26, 0xe2, 0x82, 0x2f, 0xf4, 0x99, 0x04, 0x17, 0x7c, - 0x81, 0x46, 0x6d, 0x8c, 0xbf, 0x68, 0x60, 0xbb, 0xfe, 0xf1, 0x11, 0x75, 0xc3, 0x8b, 0x7d, 0xcf, - 0xe5, 0xe4, 0x82, 0xc7, 0xb3, 0xbb, 0x1d, 0xc9, 0xee, 0xdb, 0xb1, 0xec, 0xb6, 0x92, 0x9d, 0x47, - 0x52, 0xfc, 0x09, 0x58, 0x66, 0x44, 0xda, 0x28, 0x44, 0x35, 0xf7, 0x8c, 0xa4, 0xcf, 0x43, 0xa1, - 0x29, 0x4b, 0x13, 0x76, 0x3b, 0xc5, 0xe5, 0xa8, 0x0c, 0xc5, 0xd0, 0x8c, 0xdf, 0xac, 0x81, 0xad, - 0xfe, 0x60, 0x50, 0x51, 0xec, 0x7b, 0x2e, 0xe3, 0x01, 0xa6, 0x2e, 0x67, 0x6f, 0xe0, 0x83, 0xb9, - 0x0e, 0x16, 0xfc, 0x80, 0x7a, 0x82, 0x5f, 0xa6, 0x36, 0x6b, 0x2e, 0x89, 0x0e, 0xad, 0x29, 0x19, - 0x1a, 0x68, 0xe1, 0x57, 0x40, 0x97, 0x83, 0xa5, 0x16, 0xd0, 0x73, 0x6a, 0x93, 0x16, 0x69, 0x88, - 0x80, 0xb1, 0x08, 0x40, 0xbe, 0xdf, 0x05, 0x73, 0x47, 0x31, 0xe9, 0x95, 0x14, 0x3b, 0x94, 0x8a, - 0x00, 0x19, 0xd8, 0x6c, 0x90, 0x26, 0x0e, 0x6d, 0x5e, 0x69, 0x34, 0xf6, 0xb1, 0x8f, 0x4f, 0xa8, - 0x4d, 0x39, 0x25, 0x4c, 0x9f, 0x91, 0x83, 0xf5, 0x03, 0xb1, 0xc3, 0x54, 0x13, 0x2d, 0x9e, 0x77, - 0x8a, 0xdb, 0xe3, 0x57, 0x9d, 0xd2, 0xc0, 0xa4, 0x8d, 0x52, 0xa0, 0x61, 0x1b, 0xe8, 0x01, 0xf9, - 0x59, 0x48, 0x03, 0xd2, 0xa8, 0x06, 0x9e, 0x1f, 0xa1, 0x9d, 0x95, 0xb4, 0x1f, 0x8a, 0x74, 0x50, - 0x8a, 0xcd, 0xe5, 0xc4, 0xa9, 0xf0, 0xf0, 0x01, 0x58, 0x57, 0x63, 0x3a, 0xc2, 0x3a, 0x27, 0x59, - 0x6f, 0x89, 0xc5, 0xb3, 0x32, 0xae, 0xbe, 0x9c, 0x30, 0x09, 0x74, 0xf0, 0xe6, 0x3e, 0xf1, 0x18, - 0xaf, 0xd2, 0xa0, 0x77, 0xef, 0xaa, 0xd9, 0x61, 0x8b, 0xba, 0xfa, 0x7c, 0xc2, 0x9b, 0x4b, 0xb0, - 0x43, 0xa9, 0x08, 0xb0, 0x0c, 0xe6, 0xcf, 0xe5, 0x33, 0xd3, 0x17, 0x64, 0xf4, 0x57, 0xbb, 0x9d, - 0xe2, 0x7c, 0xcf, 0x44, 0x44, 0x3c, 0x77, 0x50, 0x97, 0x1f, 0x54, 0xdf, 0x0a, 0xfe, 0x42, 0x03, - 0x10, 0xc7, 0xaf, 0x81, 0x4c, 0xbf, 0x2a, 0x07, 0xdf, 0xfb, 0xd9, 0x06, 0xdf, 0xd8, 0x35, 0xd2, - 0xdc, 0x52, 0x29, 0xc0, 0x31, 0x15, 0x43, 0x09, 0x74, 0xb0, 0x0a, 0x56, 0x07, 0x29, 0xdd, 0x23, - 0xfc, 0xa1, 0x17, 0x9c, 0xe9, 0x79, 0x59, 0x0c, 0x5d, 0x21, 0xad, 0x56, 0x62, 0x7a, 0x34, 0xe6, - 0x01, 0x6f, 0x83, 0xe5, 0x81, 0xac, 0xe6, 0x05, 0x9c, 0xe9, 0x40, 0x62, 0x6c, 0x2a, 0x8c, 0xe5, - 0x4a, 0x44, 0x8b, 0x62, 0xd6, 0xf0, 0x16, 0x58, 0x1a, 0x4a, 0x0e, 0xab, 0xfa, 0xa2, 0xf4, 0xde, - 0x50, 0xde, 0x4b, 0x95, 0x11, 0x1d, 0x8a, 0x58, 0x46, 0x3c, 0x0f, 0x6b, 0xfb, 0xfa, 0x52, 0x8a, - 0xe7, 0x61, 0x6d, 0x1f, 0x45, 0x2c, 0xa1, 0x03, 0x8a, 0xfd, 0xef, 0x21, 0xf2, 0x35, 0x7e, 0xcc, - 0x2c, 0x6c, 0xcb, 0x73, 0x44, 0xdf, 0x94, 0x60, 0xbb, 0xdd, 0x4e, 0xb1, 0x58, 0x9d, 0x6c, 0x8a, - 0x2e, 0xc3, 0x82, 0x3f, 0x8e, 0xcf, 0x8d, 0x11, 0x9e, 0x6b, 0x92, 0xe7, 0xed, 0xf1, 0x99, 0x31, - 0x42, 0x90, 0xea, 0x2d, 0x1a, 0xa9, 0x3f, 0x4f, 0xd5, 0xec, 0xd4, 0xaf, 0xbc, 0xc8, 0x2d, 0x75, - 0xe2, 0xd1, 0x31, 0x7c, 0x85, 0x51, 0x33, 0x14, 0xa3, 0x84, 0x1e, 0xc8, 0x07, 0xfd, 0x43, 0x52, - 0x5f, 0x96, 0xfc, 0xb7, 0x33, 0x9e, 0xde, 0x29, 0x67, 0xb2, 0xb9, 0xa6, 0xa8, 0xf3, 0x03, 0x0b, - 0x34, 0xe4, 0x80, 0xbf, 0xd6, 0x00, 0x64, 0xa1, 0xef, 0xdb, 0xc4, 0x21, 0x2e, 0xc7, 0x76, 0x6f, - 0xdd, 0xd4, 0x57, 0x24, 0xf5, 0x9d, 0x8c, 0xa9, 0x8f, 0xf9, 0xc7, 0x63, 0x18, 0x7c, 0x4f, 0xe3, - 0xa6, 0x28, 0x81, 0x1e, 0xb6, 0xc0, 0x7c, 0x93, 0xc9, 0xdf, 0xfa, 0xaa, 0x8c, 0xe4, 0x47, 0xd9, - 0x22, 0x49, 0xfe, 0x4b, 0xc7, 0x5c, 0x51, 0xf4, 0xf3, 0x4a, 0x8f, 0xfa, 0xe8, 0xf0, 0x0b, 0xb0, - 0x19, 0x10, 0xdc, 0xb8, 0xef, 0xda, 0x6d, 0xe4, 0x79, 0xfc, 0x80, 0xda, 0x84, 0xb5, 0x19, 0x27, - 0x8e, 0xbe, 0x26, 0xbb, 0x69, 0x70, 0xe3, 0x45, 0x89, 0x56, 0x28, 0xc5, 0x1b, 0x16, 0xc1, 0xac, - 0x58, 0xe9, 0x99, 0x0e, 0xe5, 0x14, 0xcb, 0x8b, 0x35, 0x4a, 0xd4, 0x9b, 0xa1, 0x9e, 0x7c, 0x64, - 0xd7, 0x5f, 0x4f, 0xdb, 0xf5, 0xe1, 0x87, 0x60, 0x85, 0x11, 0xcb, 0xf2, 0x1c, 0xbf, 0x16, 0x78, - 0x4d, 0x01, 0xae, 0x6f, 0x48, 0xe3, 0xf5, 0x6e, 0xa7, 0xb8, 0x52, 0x8f, 0xaa, 0x50, 0xdc, 0x16, - 0x1e, 0x81, 0x0d, 0x35, 0xaa, 0x8e, 0x5d, 0x86, 0x9b, 0xa4, 0xde, 0x66, 0x16, 0xb7, 0x99, 0xae, - 0x4b, 0x0c, 0xbd, 0xdb, 0x29, 0x6e, 0x54, 0x12, 0xf4, 0x28, 0xd1, 0x0b, 0x7e, 0x04, 0x56, 0x9b, - 0x5e, 0x70, 0x42, 0x1b, 0x0d, 0xe2, 0xf6, 0x91, 0xde, 0x92, 0x48, 0x1b, 0x62, 0xbc, 0x1d, 0xc4, - 0x74, 0x68, 0xcc, 0xda, 0xf8, 0xb7, 0x06, 0x0a, 0xe9, 0xeb, 0xc9, 0x1b, 0x58, 0x8b, 0x49, 0x74, - 0x2d, 0xfe, 0x28, 0xeb, 0x1f, 0x24, 0x69, 0x21, 0xa7, 0x6c, 0xc8, 0xbf, 0x9d, 0x06, 0xdf, 0x79, - 0x81, 0x7f, 0x55, 0xe0, 0xdf, 0x34, 0xb0, 0xe7, 0x67, 0xb8, 0xd2, 0xa9, 0x8a, 0xbc, 0xca, 0x5b, - 0xf2, 0x77, 0x55, 0x02, 0x99, 0xae, 0x94, 0x28, 0x53, 0x94, 0xe2, 0x9e, 0xeb, 0x62, 0x87, 0xc4, - 0xef, 0xb9, 0xf7, 0xb0, 0x43, 0x90, 0xd4, 0x18, 0x7f, 0xd6, 0xc0, 0x37, 0x2f, 0x9d, 0x19, 0xd0, - 0x8c, 0x6c, 0xdb, 0xa5, 0xd8, 0xb6, 0x5d, 0x48, 0x07, 0x78, 0xed, 0x7f, 0xc9, 0x9a, 0x9f, 0x3e, - 0x7e, 0x56, 0x98, 0x7a, 0xf2, 0xac, 0x30, 0xf5, 0xf4, 0x59, 0x61, 0xea, 0x51, 0xb7, 0xa0, 0x3d, - 0xee, 0x16, 0xb4, 0x27, 0xdd, 0x82, 0xf6, 0xb4, 0x5b, 0xd0, 0xfe, 0xd9, 0x2d, 0x68, 0xbf, 0xfa, - 0x57, 0x61, 0xea, 0xcb, 0xbd, 0x2c, 0xff, 0xde, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x31, - 0x4b, 0x4e, 0xe4, 0x17, 0x00, 0x00, + // 1803 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x8a, 0xfa, 0xe2, 0x48, 0x96, 0xe4, 0x91, 0x2c, 0x4f, 0xd4, 0x98, 0x54, 0xd7, 0x6e, + 0x60, 0xb4, 0xcd, 0x32, 0x36, 0xd2, 0xc6, 0x45, 0x1a, 0x23, 0x5c, 0x31, 0x72, 0x14, 0xc8, 0x31, + 0x33, 0x8c, 0x82, 0x22, 0x08, 0x8a, 0x8c, 0x96, 0x43, 0x7a, 0xac, 0xe5, 0xee, 0x76, 0x67, 0x56, + 0x16, 0xd1, 0x4b, 0x81, 0xfe, 0x03, 0x05, 0x7a, 0xef, 0xb9, 0xfd, 0x07, 0x7a, 0x29, 0xda, 0x5e, + 0x0d, 0xb4, 0x45, 0x73, 0x2a, 0x72, 0x22, 0x6a, 0x16, 0xbd, 0xf4, 0xda, 0x9b, 0x0f, 0x45, 0x31, + 0xc3, 0xe1, 0xc7, 0x2e, 0x77, 0xe9, 0x4d, 0x6a, 0x0b, 0xbd, 0x69, 0xdf, 0xc7, 0xef, 0xfd, 0xde, + 0xcc, 0xbc, 0x37, 0x6f, 0x28, 0xf0, 0x66, 0x9b, 0x89, 0x87, 0xd1, 0x89, 0xe5, 0xf8, 0x9d, 0x8a, + 0x1f, 0x50, 0x8f, 0x3f, 0x64, 0x2d, 0x51, 0x21, 0x01, 0xab, 0x70, 0xea, 0x44, 0x21, 0x13, 0xdd, + 0xca, 0xd9, 0xad, 0x4a, 0x9b, 0x7a, 0x34, 0x24, 0x82, 0x36, 0xad, 0x20, 0xf4, 0x85, 0x0f, 0x6f, + 0x8c, 0xbd, 0xac, 0x91, 0x97, 0x45, 0x02, 0x66, 0x0d, 0xbd, 0xac, 0xb3, 0x5b, 0xbb, 0xaf, 0x4f, + 0x60, 0xb7, 0xfd, 0xb6, 0x5f, 0x51, 0xce, 0x27, 0x51, 0x4b, 0x7d, 0xa9, 0x0f, 0xf5, 0xd7, 0x00, + 0x74, 0xd7, 0x3c, 0xbd, 0xc3, 0x2d, 0xe6, 0xab, 0xe0, 0x8e, 0x1f, 0xd2, 0x94, 0xc0, 0xbb, 0x6f, + 0x8e, 0x6d, 0x3a, 0xc4, 0x79, 0xc8, 0x3c, 0x1a, 0x76, 0x2b, 0xc1, 0x69, 0x5b, 0x0a, 0x78, 0xa5, + 0x43, 0x05, 0x49, 0xf3, 0xfa, 0x7e, 0x96, 0x57, 0x18, 0x79, 0x82, 0x75, 0x68, 0x85, 0x3b, 0x0f, + 0x69, 0x87, 0x24, 0xfd, 0xcc, 0xb7, 0xc1, 0xe5, 0xaa, 0xeb, 0xfa, 0x8f, 0x69, 0xf3, 0xc0, 0xa5, + 0xe7, 0x9f, 0xf8, 0x6e, 0xd4, 0xa1, 0xf0, 0x35, 0xb0, 0xd4, 0x0c, 0xd9, 0x19, 0x0d, 0x91, 0xb1, + 0x67, 0xdc, 0x2c, 0xda, 0xeb, 0x4f, 0x7a, 0xe5, 0xb9, 0x7e, 0xaf, 0xbc, 0x54, 0x53, 0x52, 0xac, + 0xb5, 0xe6, 0xaf, 0x0d, 0xb0, 0x73, 0xd0, 0xb8, 0x17, 0xfa, 0x51, 0xd0, 0x10, 0x12, 0xb5, 0xdd, + 0x7d, 0x10, 0x08, 0xe6, 0x7b, 0x1c, 0xbe, 0x05, 0x16, 0x44, 0x37, 0xa0, 0x1a, 0xe0, 0xba, 0x06, + 0x58, 0xf8, 0xb8, 0x1b, 0xd0, 0x67, 0xbd, 0xf2, 0x56, 0xc2, 0x4b, 0x8a, 0xb1, 0x72, 0x80, 0xc7, + 0x60, 0x29, 0x24, 0x5e, 0x9b, 0x72, 0x34, 0xbf, 0x57, 0xb8, 0xb9, 0x7a, 0xfb, 0x75, 0x2b, 0xcf, + 0x46, 0x58, 0x87, 0x35, 0x2c, 0xbd, 0xc6, 0x54, 0xd5, 0x27, 0xc7, 0x1a, 0xcc, 0xbc, 0x07, 0x96, + 0xb5, 0x09, 0xbc, 0x06, 0x0a, 0x1d, 0xe6, 0x29, 0x66, 0x05, 0x7b, 0x55, 0xdb, 0x17, 0xee, 0x33, + 0x0f, 0x4b, 0xb9, 0x52, 0x93, 0x73, 0x34, 0x9f, 0x50, 0x93, 0x73, 0x2c, 0xe5, 0xe6, 0x5f, 0xe6, + 0xc1, 0xd5, 0xba, 0xdf, 0x6c, 0xe8, 0xd8, 0x75, 0xdf, 0x65, 0x4e, 0x17, 0xd3, 0x33, 0x46, 0x1f, + 0xc3, 0xcf, 0xc1, 0x8a, 0xdc, 0x9f, 0x26, 0x11, 0x04, 0x15, 0xf6, 0x8c, 0x9b, 0xab, 0xb7, 0xdf, + 0xb0, 0x06, 0xfb, 0x62, 0x4d, 0xee, 0x8b, 0x15, 0x9c, 0xb6, 0xa5, 0x80, 0x5b, 0xd2, 0x5a, 0xb2, + 0x7f, 0x70, 0xf2, 0x88, 0x3a, 0xe2, 0x3e, 0x15, 0xc4, 0x86, 0x3a, 0x22, 0x18, 0xcb, 0xf0, 0x08, + 0x15, 0x3a, 0x60, 0x81, 0x07, 0xd4, 0x51, 0xe4, 0x57, 0x6f, 0x57, 0xf3, 0xad, 0x4d, 0x06, 0xdd, + 0x46, 0x40, 0x1d, 0x7b, 0x6d, 0xb8, 0x33, 0xf2, 0x0b, 0x2b, 0x70, 0x78, 0x0a, 0x96, 0xb8, 0x20, + 0x22, 0xe2, 0x6a, 0x11, 0x56, 0x6f, 0xef, 0xff, 0x6f, 0x61, 0x14, 0xd4, 0x78, 0x63, 0x06, 0xdf, + 0x58, 0x87, 0x30, 0x7f, 0x67, 0x80, 0x6f, 0xcc, 0x20, 0x08, 0x3f, 0x02, 0x2b, 0x82, 0x76, 0x02, + 0x97, 0x08, 0xaa, 0xb3, 0xbe, 0x3e, 0xb1, 0xa6, 0x96, 0xac, 0x22, 0x1d, 0xfc, 0x63, 0x6d, 0xa6, + 0xf2, 0xda, 0xd4, 0xe1, 0x56, 0x86, 0x52, 0x3c, 0x82, 0x81, 0x87, 0x60, 0x8b, 0xd3, 0xf0, 0x8c, + 0x39, 0xb4, 0xea, 0x38, 0x7e, 0xe4, 0x89, 0x0f, 0x49, 0x47, 0x9f, 0xb7, 0xa2, 0x7d, 0xb5, 0xdf, + 0x2b, 0x6f, 0x35, 0xa6, 0xd5, 0x38, 0xcd, 0xc7, 0xfc, 0x93, 0x01, 0xae, 0xcd, 0xcc, 0x1b, 0xfe, + 0xc6, 0x00, 0x3b, 0x64, 0x50, 0x61, 0x71, 0x54, 0x8e, 0x0c, 0x75, 0xc0, 0x3f, 0xca, 0xb7, 0xba, + 0x71, 0xe7, 0xd9, 0x6b, 0x5d, 0xd2, 0xc9, 0xef, 0x54, 0x53, 0x03, 0xe3, 0x0c, 0x42, 0xe6, 0xbf, + 0xe6, 0x81, 0x39, 0x85, 0xdc, 0xa0, 0x6e, 0xab, 0x11, 0xa9, 0xc3, 0x78, 0x61, 0xc7, 0xdc, 0x8b, + 0x1d, 0xf3, 0xa3, 0xaf, 0x79, 0xfe, 0xa6, 0x98, 0x67, 0x9e, 0xf8, 0x30, 0x71, 0xe2, 0x3f, 0xf8, + 0xba, 0x11, 0x63, 0xd1, 0x66, 0x1f, 0xfc, 0x9f, 0x82, 0xd7, 0xf2, 0x31, 0x7e, 0x09, 0x25, 0x60, + 0xf6, 0xe7, 0x41, 0x69, 0x36, 0xfb, 0x0b, 0xd8, 0xe5, 0x47, 0xb1, 0x5d, 0x7e, 0xff, 0x85, 0xac, + 0xf9, 0xff, 0xd3, 0x0e, 0xff, 0xde, 0x48, 0x2b, 0xa7, 0x0b, 0xd8, 0x5e, 0xb8, 0x07, 0x16, 0x22, + 0x4e, 0x43, 0x95, 0x6b, 0x71, 0xbc, 0x1e, 0xc7, 0x9c, 0x86, 0x58, 0x69, 0xa0, 0x09, 0x96, 0xda, + 0xf2, 0x06, 0xe6, 0xa8, 0xa0, 0xda, 0x1e, 0x90, 0xfc, 0xd5, 0x9d, 0xcc, 0xb1, 0xd6, 0x98, 0xff, + 0x36, 0xc0, 0x8d, 0x3c, 0x0b, 0x00, 0xeb, 0xa0, 0xa8, 0x3b, 0x8a, 0xdd, 0x9d, 0x95, 0xc2, 0x03, + 0xed, 0xda, 0xa2, 0x21, 0xf5, 0x1c, 0x6a, 0x5f, 0xea, 0xf7, 0xca, 0xc5, 0xea, 0xd0, 0x13, 0x8f, + 0x41, 0xe4, 0x04, 0x12, 0x52, 0xc2, 0x7d, 0x4f, 0xa7, 0x30, 0xbe, 0xd6, 0x95, 0x14, 0x6b, 0x6d, + 0x6c, 0xed, 0x0a, 0x2f, 0xa6, 0x34, 0x7e, 0x6b, 0x80, 0x0d, 0x35, 0x28, 0x48, 0x62, 0x0e, 0x91, + 0xe3, 0x4c, 0xac, 0x16, 0x8c, 0x97, 0x52, 0x0b, 0xd7, 0xc1, 0xa2, 0x9a, 0x54, 0x74, 0xbe, 0x97, + 0xb4, 0xf1, 0xa2, 0x62, 0x82, 0x07, 0x3a, 0xf8, 0x2a, 0x58, 0x18, 0x95, 0xe3, 0x9a, 0xbd, 0x22, + 0xb7, 0xb4, 0x46, 0x04, 0xc1, 0x4a, 0x6a, 0xfe, 0xd5, 0x00, 0x5b, 0x09, 0xe2, 0x47, 0x8c, 0x0b, + 0xf8, 0xd9, 0x14, 0x79, 0x2b, 0x1f, 0x79, 0xe9, 0xad, 0xa8, 0x8f, 0x96, 0x6b, 0x28, 0x99, 0x20, + 0xfe, 0x29, 0x58, 0x64, 0x82, 0x76, 0x86, 0xe3, 0xda, 0xf7, 0xf2, 0xd5, 0x55, 0x82, 0xe7, 0x38, + 0xdf, 0x43, 0x89, 0x85, 0x07, 0x90, 0xe6, 0xdf, 0x0c, 0x80, 0x70, 0xe4, 0x55, 0xb9, 0x3c, 0xb8, + 0xc9, 0x09, 0xf3, 0x07, 0xb1, 0x09, 0xf3, 0x5b, 0x89, 0x09, 0xf3, 0xca, 0x94, 0xdf, 0xc4, 0x8c, + 0xf9, 0x0a, 0x28, 0x44, 0xac, 0xa9, 0x47, 0xbc, 0x65, 0x39, 0xde, 0x1d, 0x1f, 0xd6, 0xb0, 0x94, + 0xc1, 0x5b, 0x60, 0x35, 0x62, 0x4d, 0x45, 0xef, 0x3e, 0xf3, 0xd4, 0x4a, 0x17, 0xec, 0x8d, 0x7e, + 0xaf, 0xbc, 0x7a, 0xac, 0xe7, 0x47, 0x39, 0x28, 0x4e, 0xda, 0xc4, 0x5c, 0xc8, 0x39, 0x5a, 0x48, + 0x71, 0x21, 0xe7, 0x78, 0xd2, 0xc6, 0xfc, 0xa3, 0x01, 0xae, 0x35, 0xde, 0x3b, 0x62, 0x5e, 0x74, + 0xbe, 0xef, 0x7b, 0x82, 0x9e, 0x8b, 0x64, 0x76, 0x77, 0x63, 0xd9, 0x7d, 0x3b, 0x91, 0xdd, 0x6e, + 0xba, 0xf3, 0x44, 0x8a, 0x3f, 0x06, 0xeb, 0x9c, 0x2a, 0x1b, 0x8d, 0xa8, 0xfb, 0x9e, 0x99, 0x56, + 0x1e, 0x1a, 0x4d, 0x5b, 0xda, 0xb0, 0xdf, 0x2b, 0xaf, 0xc7, 0x65, 0x38, 0x81, 0x66, 0xfe, 0xe7, + 0x32, 0xd8, 0x1d, 0x36, 0x06, 0xcd, 0x62, 0xdf, 0xf7, 0xb8, 0x08, 0x09, 0xf3, 0x04, 0xbf, 0x80, + 0x82, 0xb9, 0x09, 0x56, 0x82, 0x90, 0xf9, 0x32, 0xbe, 0x4a, 0x6d, 0xd1, 0x5e, 0x93, 0x27, 0xb4, + 0xae, 0x65, 0x78, 0xa4, 0x85, 0x9f, 0x01, 0xa4, 0x1a, 0x4b, 0x3d, 0x64, 0x67, 0xcc, 0xa5, 0x6d, + 0xda, 0x94, 0x84, 0x89, 0x24, 0xa0, 0xf6, 0x77, 0xc5, 0xde, 0xd3, 0x91, 0x50, 0x35, 0xc3, 0x0e, + 0x67, 0x22, 0x40, 0x0e, 0x76, 0x9a, 0xb4, 0x45, 0x22, 0x57, 0x54, 0x9b, 0xcd, 0x7d, 0x12, 0x90, + 0x13, 0xe6, 0x32, 0xc1, 0x28, 0x47, 0x0b, 0xaa, 0xb1, 0xbe, 0x2d, 0xe7, 0xb0, 0x5a, 0xaa, 0xc5, + 0xb3, 0x5e, 0xf9, 0xda, 0xf4, 0x83, 0xd0, 0x1a, 0x99, 0x74, 0x71, 0x06, 0x34, 0xec, 0x02, 0x14, + 0xd2, 0x9f, 0x44, 0x2c, 0xa4, 0xcd, 0x5a, 0xe8, 0x07, 0xb1, 0xb0, 0x8b, 0x2a, 0xec, 0x3b, 0x32, + 0x1d, 0x9c, 0x61, 0xf3, 0xfc, 0xc0, 0x99, 0xf0, 0xf0, 0x11, 0xd8, 0xd2, 0x6d, 0x3a, 0x16, 0x75, + 0x49, 0x45, 0xbd, 0x23, 0x87, 0xe7, 0xea, 0xb4, 0xfa, 0xf9, 0x01, 0xd3, 0x40, 0x47, 0x3b, 0xf7, + 0xbe, 0xcf, 0x45, 0x8d, 0x85, 0x83, 0xd7, 0x69, 0xdd, 0x8d, 0xda, 0xcc, 0x43, 0xcb, 0x29, 0x3b, + 0x97, 0x62, 0x87, 0x33, 0x11, 0x60, 0x05, 0x2c, 0x9f, 0xa9, 0x6f, 0x8e, 0x56, 0x14, 0xfb, 0x2b, + 0xfd, 0x5e, 0x79, 0x79, 0x60, 0x22, 0x19, 0x2f, 0x1d, 0x34, 0x54, 0x41, 0x0d, 0xad, 0xe0, 0xcf, + 0x0d, 0x00, 0x49, 0xf2, 0xb1, 0xcc, 0xd1, 0x15, 0xd5, 0xf8, 0xde, 0xca, 0xd7, 0xf8, 0xa6, 0x1e, + 0xdb, 0xf6, 0xae, 0x4e, 0x01, 0x4e, 0xa9, 0x38, 0x4e, 0x09, 0x07, 0x6b, 0x60, 0x73, 0x94, 0xd2, + 0x87, 0x54, 0x3c, 0xf6, 0xc3, 0x53, 0x54, 0x54, 0x8b, 0x81, 0x34, 0xd2, 0x66, 0x35, 0xa1, 0xc7, + 0x53, 0x1e, 0xf0, 0x2e, 0x58, 0x1f, 0xc9, 0xea, 0x7e, 0x28, 0x38, 0x02, 0x0a, 0x63, 0x47, 0x63, + 0xac, 0x57, 0x63, 0x5a, 0x9c, 0xb0, 0x86, 0x77, 0xc0, 0xda, 0x58, 0x72, 0x58, 0x43, 0xab, 0xca, + 0x7b, 0x5b, 0x7b, 0xaf, 0x55, 0x27, 0x74, 0x38, 0x66, 0x19, 0xf3, 0x3c, 0xac, 0xef, 0xa3, 0xb5, + 0x0c, 0xcf, 0xc3, 0xfa, 0x3e, 0x8e, 0x59, 0xc2, 0xcf, 0x01, 0x94, 0xb3, 0x8b, 0x7a, 0x79, 0x05, + 0xc4, 0xa1, 0x47, 0xf4, 0x8c, 0xba, 0x68, 0x57, 0x75, 0xc8, 0x37, 0x86, 0xab, 0x78, 0x3c, 0x65, + 0xf1, 0xac, 0x57, 0x86, 0x71, 0x89, 0xda, 0xd6, 0x14, 0x2c, 0xd8, 0x01, 0xe5, 0x61, 0xc5, 0xc5, + 0xea, 0xfd, 0x3d, 0xee, 0x10, 0x57, 0xdd, 0x54, 0x68, 0x47, 0xd1, 0xbd, 0xde, 0xef, 0x95, 0xcb, + 0xb5, 0xd9, 0xa6, 0xf8, 0x79, 0x58, 0xf0, 0x47, 0xc9, 0xce, 0x34, 0x11, 0xe7, 0xaa, 0x8a, 0xf3, + 0xea, 0x74, 0x57, 0x9a, 0x08, 0x90, 0xe9, 0x2d, 0x8f, 0xea, 0xb0, 0x63, 0xeb, 0xee, 0x8c, 0x2e, + 0x7d, 0x95, 0xb7, 0xfc, 0xcc, 0xcb, 0x69, 0x7c, 0x48, 0xe2, 0x66, 0x38, 0x11, 0x12, 0xfa, 0xa0, + 0x18, 0x0e, 0xaf, 0x61, 0xb4, 0xae, 0xe2, 0xdf, 0xcd, 0x39, 0x1f, 0x64, 0xdc, 0xfa, 0xf6, 0x65, + 0x1d, 0xba, 0x38, 0xb2, 0xc0, 0xe3, 0x18, 0xf0, 0x97, 0x06, 0x80, 0x3c, 0x0a, 0x02, 0x97, 0x76, + 0xa8, 0x27, 0x88, 0x3b, 0x18, 0x68, 0xd1, 0x86, 0x0a, 0x7d, 0x2f, 0x67, 0xea, 0x53, 0xfe, 0x49, + 0x0e, 0xa3, 0x8a, 0x9d, 0x36, 0xc5, 0x29, 0xe1, 0x61, 0x1b, 0x2c, 0xb7, 0xb8, 0xfa, 0x1b, 0x6d, + 0x2a, 0x26, 0x3f, 0xcc, 0xc7, 0x24, 0xfd, 0xa7, 0x35, 0x7b, 0x43, 0x87, 0x5f, 0xd6, 0x7a, 0x3c, + 0x44, 0x87, 0x9f, 0x80, 0x9d, 0x90, 0x92, 0xe6, 0x03, 0xcf, 0xed, 0x62, 0xdf, 0x17, 0x07, 0xcc, + 0xa5, 0xbc, 0xcb, 0x05, 0xed, 0xa0, 0xcb, 0xea, 0x34, 0x8d, 0x7e, 0x17, 0xc0, 0xa9, 0x56, 0x38, + 0xc3, 0x1b, 0x96, 0xc1, 0xa2, 0x2c, 0x16, 0x8e, 0xa0, 0xea, 0x93, 0x45, 0x39, 0xa8, 0xc9, 0xf5, + 0xe6, 0x78, 0x20, 0x9f, 0x78, 0x4d, 0x6c, 0x65, 0xbd, 0x26, 0xe0, 0x3b, 0x60, 0x83, 0x53, 0xc7, + 0xf1, 0x3b, 0x41, 0x3d, 0xf4, 0x5b, 0x12, 0x1c, 0x6d, 0x2b, 0xe3, 0xad, 0x7e, 0xaf, 0xbc, 0xd1, + 0x88, 0xab, 0x70, 0xd2, 0x16, 0x1e, 0x81, 0x6d, 0xdd, 0x0c, 0x8f, 0x3d, 0x4e, 0x5a, 0xb4, 0xd1, + 0xe5, 0x8e, 0x70, 0x39, 0x42, 0x0a, 0x03, 0xf5, 0x7b, 0xe5, 0xed, 0x6a, 0x8a, 0x1e, 0xa7, 0x7a, + 0xc1, 0x77, 0xc1, 0x66, 0xcb, 0x0f, 0x4f, 0x58, 0xb3, 0x49, 0xbd, 0x21, 0xd2, 0x2b, 0x0a, 0x69, + 0x5b, 0x36, 0xd0, 0x83, 0x84, 0x0e, 0x4f, 0x59, 0x9b, 0xff, 0x34, 0x40, 0x29, 0x7b, 0x00, 0xba, + 0x80, 0xc1, 0x9b, 0xc6, 0x07, 0xef, 0x77, 0xf3, 0xfe, 0x8c, 0x94, 0x45, 0x39, 0x63, 0x06, 0xff, + 0xd5, 0x3c, 0xf8, 0xce, 0x57, 0xf8, 0xed, 0x09, 0xfe, 0xd9, 0x00, 0x37, 0x82, 0x1c, 0x8f, 0x46, + 0xbd, 0x22, 0x2f, 0xf2, 0x1d, 0xfe, 0x5d, 0x9d, 0x40, 0xae, 0x47, 0x2b, 0xce, 0xc5, 0x52, 0xbe, + 0xa4, 0x3d, 0xd2, 0xa1, 0xc9, 0x97, 0xb4, 0xbc, 0x37, 0xb0, 0xd2, 0x98, 0x7f, 0x30, 0xc0, 0x37, + 0x9f, 0xdb, 0x33, 0xa0, 0x1d, 0x9b, 0xe7, 0xad, 0xc4, 0x3c, 0x5f, 0xca, 0x06, 0x78, 0xe9, 0x3f, + 0x8d, 0xdb, 0x1f, 0x3c, 0x79, 0x5a, 0x9a, 0xfb, 0xe2, 0x69, 0x69, 0xee, 0xcb, 0xa7, 0xa5, 0xb9, + 0x9f, 0xf5, 0x4b, 0xc6, 0x93, 0x7e, 0xc9, 0xf8, 0xa2, 0x5f, 0x32, 0xbe, 0xec, 0x97, 0x8c, 0xbf, + 0xf7, 0x4b, 0xc6, 0x2f, 0xfe, 0x51, 0x9a, 0xfb, 0xf4, 0x46, 0x9e, 0xff, 0xa2, 0xfc, 0x37, 0x00, + 0x00, 0xff, 0xff, 0xb7, 0xb2, 0xaf, 0x36, 0x6c, 0x19, 0x00, 0x00, } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { @@ -824,6 +827,16 @@ func (m *PodSecurityPolicyReview) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -946,6 +959,16 @@ func (m *PodSecurityPolicySelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) ( _ = i var l int _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1022,6 +1045,16 @@ func (m *PodSecurityPolicySubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1337,6 +1370,13 @@ func (m *SecurityContextConstraints) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + i -= len(m.UserNamespaceLevel) + copy(dAtA[i:], m.UserNamespaceLevel) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserNamespaceLevel))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 if len(m.ForbiddenSysctls) > 0 { for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.ForbiddenSysctls[iNdEx]) @@ -1773,6 +1813,8 @@ func (m *PodSecurityPolicyReview) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1818,6 +1860,8 @@ func (m *PodSecurityPolicySelfSubjectReview) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1842,6 +1886,8 @@ func (m *PodSecurityPolicySubjectReview) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2042,6 +2088,8 @@ func (m *SecurityContextConstraints) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + l = len(m.UserNamespaceLevel) + n += 2 + l + sovGenerated(uint64(l)) return n } @@ -2142,6 +2190,7 @@ func (this *PodSecurityPolicyReview) String() string { s := strings.Join([]string{`&PodSecurityPolicyReview{`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicyReviewSpec", "PodSecurityPolicyReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicyReviewStatus", "PodSecurityPolicyReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2151,7 +2200,7 @@ func (this *PodSecurityPolicyReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicyReviewSpec{`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `ServiceAccountNames:` + fmt.Sprintf("%v", this.ServiceAccountNames) + `,`, `}`, }, "") @@ -2179,6 +2228,7 @@ func (this *PodSecurityPolicySelfSubjectReview) String() string { s := strings.Join([]string{`&PodSecurityPolicySelfSubjectReview{`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySelfSubjectReviewSpec", "PodSecurityPolicySelfSubjectReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2188,7 +2238,7 @@ func (this *PodSecurityPolicySelfSubjectReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicySelfSubjectReviewSpec{`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2200,6 +2250,7 @@ func (this *PodSecurityPolicySubjectReview) String() string { s := strings.Join([]string{`&PodSecurityPolicySubjectReview{`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySubjectReviewSpec", "PodSecurityPolicySubjectReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2209,7 +2260,7 @@ func (this *PodSecurityPolicySubjectReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicySubjectReviewSpec{`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `}`, @@ -2221,9 +2272,9 @@ func (this *PodSecurityPolicySubjectReviewStatus) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicySubjectReviewStatus{`, - `AllowedBy:` + strings.Replace(fmt.Sprintf("%v", this.AllowedBy), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `AllowedBy:` + strings.Replace(fmt.Sprintf("%v", this.AllowedBy), "ObjectReference", "v11.ObjectReference", 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2233,7 +2284,7 @@ func (this *RangeAllocation) String() string { return "nil" } s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Range:` + fmt.Sprintf("%v", this.Range) + `,`, `Data:` + valueToStringGenerated(this.Data) + `,`, `}`, @@ -2250,7 +2301,7 @@ func (this *RangeAllocationList) String() string { } repeatedStringForItems += "}" s := strings.Join([]string{`&RangeAllocationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") @@ -2275,7 +2326,7 @@ func (this *SELinuxContextStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxContextStrategyOptions{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -2290,7 +2341,7 @@ func (this *SecurityContextConstraints) String() string { } repeatedStringForAllowedFlexVolumes += "}" s := strings.Join([]string{`&SecurityContextConstraints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Priority:` + valueToStringGenerated(this.Priority) + `,`, `AllowPrivilegedContainer:` + fmt.Sprintf("%v", this.AllowPrivilegedContainer) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -2315,6 +2366,7 @@ func (this *SecurityContextConstraints) String() string { `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `UserNamespaceLevel:` + fmt.Sprintf("%v", this.UserNamespaceLevel) + `,`, `}`, }, "") return s @@ -2329,7 +2381,7 @@ func (this *SecurityContextConstraintsList) String() string { } repeatedStringForItems += "}" s := strings.Join([]string{`&SecurityContextConstraintsList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") @@ -2751,6 +2803,39 @@ func (m *PodSecurityPolicyReview) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3066,6 +3151,39 @@ func (m *PodSecurityPolicySelfSubjectReview) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3265,6 +3383,39 @@ func (m *PodSecurityPolicySubjectReview) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3492,7 +3643,7 @@ func (m *PodSecurityPolicySubjectReviewStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.AllowedBy == nil { - m.AllowedBy = &v1.ObjectReference{} + m.AllowedBy = &v11.ObjectReference{} } if err := m.AllowedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4083,7 +4234,7 @@ func (m *SELinuxContextStrategyOptions) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4828,6 +4979,38 @@ func (m *SecurityContextConstraints) Unmarshal(dAtA []byte) error { } m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaceLevel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserNamespaceLevel = NamespaceLevelType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index d842079a0..fdb879ce0 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -25,6 +25,7 @@ message FSGroupStrategyOptions { // Ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. + // +listType=atomic repeated IDRange ranges = 2; } @@ -43,6 +44,10 @@ message IDRange { // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=2 message PodSecurityPolicyReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + // spec is the PodSecurityPolicy to check. optional PodSecurityPolicyReviewSpec spec = 1; @@ -56,7 +61,7 @@ message PodSecurityPolicyReviewSpec { // if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, // in which case "default" is used. // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. - optional k8s.io.api.core.v1.PodTemplateSpec template = 1; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 1; // serviceAccountNames is an optional set of ServiceAccounts to run the check with. // If serviceAccountNames is empty, the template.spec.serviceAccountName is used, @@ -76,6 +81,10 @@ message PodSecurityPolicyReviewStatus { // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=2 message PodSecurityPolicySelfSubjectReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + // spec defines specification the PodSecurityPolicySelfSubjectReview. optional PodSecurityPolicySelfSubjectReviewSpec spec = 1; @@ -86,7 +95,7 @@ message PodSecurityPolicySelfSubjectReview { // PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview. message PodSecurityPolicySelfSubjectReviewSpec { // template is the PodTemplateSpec to check. - optional k8s.io.api.core.v1.PodTemplateSpec template = 1; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 1; } // PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec. @@ -94,6 +103,10 @@ message PodSecurityPolicySelfSubjectReviewSpec { // Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=2 message PodSecurityPolicySubjectReview { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; + // spec defines specification for the PodSecurityPolicySubjectReview. optional PodSecurityPolicySubjectReviewSpec spec = 1; @@ -105,7 +118,7 @@ message PodSecurityPolicySubjectReview { message PodSecurityPolicySubjectReviewSpec { // template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. // If its non-empty, it will be checked. - optional k8s.io.api.core.v1.PodTemplateSpec template = 1; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 1; // user is the user you're testing for. // If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups. @@ -121,7 +134,7 @@ message PodSecurityPolicySubjectReviewStatus { // allowedBy is a reference to the rule that allows the PodTemplateSpec. // A rule can be a SecurityContextConstraint or a PodSecurityPolicy // A `nil`, indicates that it was denied. - optional k8s.io.api.core.v1.ObjectReference allowedBy = 1; + optional .k8s.io.api.core.v1.ObjectReference allowedBy = 1; // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there @@ -129,7 +142,7 @@ message PodSecurityPolicySubjectReviewStatus { optional string reason = 2; // template is the PodTemplateSpec after the defaulting is applied. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // RangeAllocation is used so we can easily expose a RangeAllocation typed for security group @@ -139,7 +152,7 @@ message PodSecurityPolicySubjectReviewStatus { message RangeAllocation { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". optional string range = 2; @@ -156,7 +169,7 @@ message RangeAllocation { message RangeAllocationList { // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of RangeAllocations. repeated RangeAllocation items = 2; @@ -184,7 +197,7 @@ message SELinuxContextStrategyOptions { optional string type = 1; // seLinuxOptions required to run as; required for MustRunAs - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; + optional .k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; } // SecurityContextConstraints governs the ability to make requests that affect the SecurityContext @@ -195,21 +208,26 @@ message SELinuxContextStrategyOptions { // SecurityContextConstraints. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" -// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" -// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" -// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" -// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" -// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" -// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" -// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" -// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=securitycontextconstraints,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=.allowPrivilegedContainer,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=.allowedCapabilities,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=.seLinuxContext.type,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=.runAsUser.type,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=.fsGroup.type,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=.supplementalGroups.type,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=.priority,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=.readOnlyRootFilesystem,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true message SecurityContextConstraints { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Priority influences the sort order of SCCs when evaluating which SCCs to try first for // a given pod request based on access in the Users and Groups fields. The higher the int, the @@ -227,11 +245,13 @@ message SecurityContextConstraints { // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable + // +listType=atomic repeated string defaultAddCapabilities = 4; // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable + // +listType=atomic repeated string requiredDropCapabilities = 5; // AllowedCapabilities is a list of capabilities that can be requested to add to the container. @@ -239,6 +259,7 @@ message SecurityContextConstraints { // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. // +nullable + // +listType=atomic repeated string allowedCapabilities = 6; // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin @@ -249,6 +270,7 @@ message SecurityContextConstraints { // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable + // +listType=atomic repeated string volumes = 8; // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all @@ -256,6 +278,7 @@ message SecurityContextConstraints { // is allowed in the "Volumes" field. // +optional // +nullable + // +listType=atomic repeated AllowedFlexVolume allowedFlexVolumes = 21; // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. @@ -270,6 +293,18 @@ message SecurityContextConstraints { // AllowHostIPC determines if the policy allows host ipc in the containers. optional bool allowHostIPC = 12; + // userNamespaceLevel determines if the policy allows host users in containers. + // Valid values are "AllowHostLevel", "RequirePodLevel", and omitted. + // When "AllowHostLevel" is set, a pod author may set `hostUsers` to either `true` or `false`. + // When "RequirePodLevel" is set, a pod author must set `hostUsers` to `false`. + // When omitted, the default value is "AllowHostLevel". + // +openshift:enable:FeatureGate=UserNamespacesPodSecurityStandards + // +kubebuilder:validation:Enum="AllowHostLevel";"RequirePodLevel" + // +kubebuilder:default:="AllowHostLevel" + // +default="AllowHostLevel" + // +optional + optional string userNamespaceLevel = 26; + // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional @@ -308,11 +343,13 @@ message SecurityContextConstraints { // The users who have permissions to use this security context constraints // +optional // +nullable + // +listType=atomic repeated string users = 18; // The groups that have permission to use this security context constraints // +optional // +nullable + // +listType=atomic repeated string groups = 19; // SeccompProfiles lists the allowed profiles that may be set for the pod or @@ -321,6 +358,7 @@ message SecurityContextConstraints { // used to generate a value for a pod the first non-wildcard profile will be used as // the default. // +nullable + // +listType=atomic repeated string seccompProfiles = 20; // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -333,6 +371,7 @@ message SecurityContextConstraints { // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional // +nullable + // +listType=atomic repeated string allowedUnsafeSysctls = 24; // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. @@ -344,6 +383,7 @@ message SecurityContextConstraints { // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional // +nullable + // +listType=atomic repeated string forbiddenSysctls = 25; } @@ -354,7 +394,7 @@ message SecurityContextConstraints { message SecurityContextConstraintsList { // metadata is the standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of security context constraints. repeated SecurityContextConstraints items = 2; @@ -375,6 +415,7 @@ message SupplementalGroupsStrategyOptions { // Ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. + // +listType=atomic repeated IDRange ranges = 2; } diff --git a/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml b/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml deleted file mode 100644 index d663b94c2..000000000 --- a/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] SecurityContextConstraints" -crd: 0000_03_security-openshift_01_scc.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal SecurityContextConstraints - initial: | - apiVersion: security.openshift.io/v1 - kind: SecurityContextConstraints - allowHostDirVolumePlugin: false - allowHostIPC: false - allowHostNetwork: false - allowHostPID: false - allowHostPorts: false - allowPrivilegedContainer: false - allowedCapabilities: [] - defaultAddCapabilities: [] - priority: 0 - readOnlyRootFilesystem: false - requiredDropCapabilities: [] - volumes: [] - expected: | - apiVersion: security.openshift.io/v1 - kind: SecurityContextConstraints - allowHostDirVolumePlugin: false - allowHostIPC: false - allowHostNetwork: false - allowHostPID: false - allowHostPorts: false - allowPrivilegedContainer: false - allowedCapabilities: [] - defaultAddCapabilities: [] - priority: 0 - readOnlyRootFilesystem: false - requiredDropCapabilities: [] - volumes: [] diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 3e208210c..9d0af5c8d 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -22,17 +22,22 @@ var AllowAllCapabilities corev1.Capability = "*" // SecurityContextConstraints. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" -// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" -// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" -// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" -// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" -// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" -// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" -// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" -// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=securitycontextconstraints,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=.allowPrivilegedContainer,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=.allowedCapabilities,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=.seLinuxContext.type,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=.runAsUser.type,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=.fsGroup.type,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=.supplementalGroups.type,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=.priority,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=.readOnlyRootFilesystem,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 +// +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type SecurityContextConstraints struct { metav1.TypeMeta `json:",inline"` @@ -55,16 +60,19 @@ type SecurityContextConstraints struct { // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable + // +listType=atomic DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable + // +listType=atomic RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` // AllowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. // +nullable + // +listType=atomic AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false @@ -73,12 +81,14 @@ type SecurityContextConstraints struct { // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable + // +listType=atomic Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional // +nullable + // +listType=atomic AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` @@ -88,6 +98,17 @@ type SecurityContextConstraints struct { AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"` // AllowHostIPC determines if the policy allows host ipc in the containers. AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"` + // userNamespaceLevel determines if the policy allows host users in containers. + // Valid values are "AllowHostLevel", "RequirePodLevel", and omitted. + // When "AllowHostLevel" is set, a pod author may set `hostUsers` to either `true` or `false`. + // When "RequirePodLevel" is set, a pod author must set `hostUsers` to `false`. + // When omitted, the default value is "AllowHostLevel". + // +openshift:enable:FeatureGate=UserNamespacesPodSecurityStandards + // +kubebuilder:validation:Enum="AllowHostLevel";"RequirePodLevel" + // +kubebuilder:default:="AllowHostLevel" + // +default="AllowHostLevel" + // +optional + UserNamespaceLevel NamespaceLevelType `json:"userNamespaceLevel,omitempty" protobuf:"bytes,26,opt,name=userNamespaceLevel"` // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional @@ -120,10 +141,12 @@ type SecurityContextConstraints struct { // The users who have permissions to use this security context constraints // +optional // +nullable + // +listType=atomic Users []string `json:"users" protobuf:"bytes,18,rep,name=users"` // The groups that have permission to use this security context constraints // +optional // +nullable + // +listType=atomic Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` // SeccompProfiles lists the allowed profiles that may be set for the pod or @@ -132,6 +155,7 @@ type SecurityContextConstraints struct { // used to generate a value for a pod the first non-wildcard profile will be used as // the default. // +nullable + // +listType=atomic SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -144,6 +168,7 @@ type SecurityContextConstraints struct { // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional // +nullable + // +listType=atomic AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered @@ -154,6 +179,7 @@ type SecurityContextConstraints struct { // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional // +nullable + // +listType=atomic ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"` } @@ -190,6 +216,7 @@ var ( FSStorageOS FSType = "storageOS" FSTypeCSI FSType = "csi" FSTypeEphemeral FSType = "ephemeral" + FSTypeImage FSType = "image" FSTypeAll FSType = "*" FSTypeNone FSType = "none" ) @@ -227,6 +254,7 @@ type FSGroupStrategyOptions struct { Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"` // Ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. + // +listType=atomic Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } @@ -236,6 +264,7 @@ type SupplementalGroupsStrategyOptions struct { Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"` // Ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. + // +listType=atomic Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } @@ -248,6 +277,9 @@ type IDRange struct { Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"` } +// NamespaceLevelType shows the allowable values for the UserNamespaceLevel field. +type NamespaceLevelType string + // SELinuxContextStrategyType denotes strategy types for generating SELinux options for a // SecurityContext type SELinuxContextStrategyType string @@ -265,6 +297,11 @@ type SupplementalGroupsStrategyType string type FSGroupStrategyType string const ( + // NamespaceLevelAllowHost allows a pod to set `hostUsers` field to either `true` or `false` + NamespaceLevelAllowHost NamespaceLevelType = "AllowHostLevel" + // NamespaceLevelRequirePod requires the `hostUsers` field be `false` in a pod. + NamespaceLevelRequirePod NamespaceLevelType = "RequirePodLevel" + // container must have SELinux labels of X applied. SELinuxStrategyMustRunAs SELinuxContextStrategyType = "MustRunAs" // container may make requests for any SELinux context labels. @@ -318,6 +355,10 @@ type SecurityContextConstraintsList struct { type PodSecurityPolicySubjectReview struct { metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + // spec defines specification for the PodSecurityPolicySubjectReview. Spec PodSecurityPolicySubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` @@ -367,6 +408,10 @@ type PodSecurityPolicySubjectReviewStatus struct { type PodSecurityPolicySelfSubjectReview struct { metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + // spec defines specification the PodSecurityPolicySelfSubjectReview. Spec PodSecurityPolicySelfSubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` @@ -391,6 +436,10 @@ type PodSecurityPolicySelfSubjectReviewSpec struct { type PodSecurityPolicyReview struct { metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` + // spec is the PodSecurityPolicy to check. Spec PodSecurityPolicyReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go index 26c88f7de..66e8b5a21 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go @@ -67,6 +67,7 @@ func (in *IDRange) DeepCopy() *IDRange { func (in *PodSecurityPolicyReview) DeepCopyInto(out *PodSecurityPolicyReview) { *out = *in out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return @@ -139,6 +140,7 @@ func (in *PodSecurityPolicyReviewStatus) DeepCopy() *PodSecurityPolicyReviewStat func (in *PodSecurityPolicySelfSubjectReview) DeepCopyInto(out *PodSecurityPolicySelfSubjectReview) { *out = *in out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return @@ -183,6 +185,7 @@ func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopy() *PodSecurityPolicyS func (in *PodSecurityPolicySubjectReview) DeepCopyInto(out *PodSecurityPolicySubjectReview) { *out = *in out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..178c97078 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,60 @@ +securitycontextconstraints.security.openshift.io: + Annotations: + release.openshift.io/bootstrap-required: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: securitycontextconstraints.security.openshift.io + Capability: "" + Category: "" + FeatureGates: + - UserNamespacesPodSecurityStandards + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: security.openshift.io + HasStatus: false + KindName: SecurityContextConstraints + Labels: {} + PluralName: securitycontextconstraints + PrinterColumns: + - description: Determines if a container can request to be run as privileged + jsonPath: .allowPrivilegedContainer + name: Priv + type: string + - description: A list of capabilities that can be requested to add to the container + jsonPath: .allowedCapabilities + name: Caps + type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext + jsonPath: .seLinuxContext.type + name: SELinux + type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext + jsonPath: .runAsUser.type + name: RunAsUser + type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext + jsonPath: .fsGroup.type + name: FSGroup + type: string + - description: Strategy that will dictate what supplemental groups are used by the + SecurityContext + jsonPath: .supplementalGroups.type + name: SupGroup + type: string + - description: Sort order of SCCs + jsonPath: .priority + name: Priority + type: string + - description: Force containers to run with a read only root file system + jsonPath: .readOnlyRootFilesystem + name: ReadOnlyRootFS + type: string + - description: White list of allowed volume plugins + jsonPath: .volumes + name: Volumes + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go index a72b8ecf0..2f242366a 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -41,9 +41,10 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_PodSecurityPolicyReview = map[string]string{ - "": "PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "spec": "spec is the PodSecurityPolicy to check.", - "status": "status represents the current information/status for the PodSecurityPolicyReview.", + "": "PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the PodSecurityPolicy to check.", + "status": "status represents the current information/status for the PodSecurityPolicyReview.", } func (PodSecurityPolicyReview) SwaggerDoc() map[string]string { @@ -70,9 +71,10 @@ func (PodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { } var map_PodSecurityPolicySelfSubjectReview = map[string]string{ - "": "PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "spec": "spec defines specification the PodSecurityPolicySelfSubjectReview.", - "status": "status represents the current information/status for the PodSecurityPolicySelfSubjectReview.", + "": "PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines specification the PodSecurityPolicySelfSubjectReview.", + "status": "status represents the current information/status for the PodSecurityPolicySelfSubjectReview.", } func (PodSecurityPolicySelfSubjectReview) SwaggerDoc() map[string]string { @@ -89,9 +91,10 @@ func (PodSecurityPolicySelfSubjectReviewSpec) SwaggerDoc() map[string]string { } var map_PodSecurityPolicySubjectReview = map[string]string{ - "": "PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "spec": "spec defines specification for the PodSecurityPolicySubjectReview.", - "status": "status represents the current information/status for the PodSecurityPolicySubjectReview.", + "": "PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines specification for the PodSecurityPolicySubjectReview.", + "status": "status represents the current information/status for the PodSecurityPolicySubjectReview.", } func (PodSecurityPolicySubjectReview) SwaggerDoc() map[string]string { @@ -178,6 +181,7 @@ var map_SecurityContextConstraints = map[string]string{ "allowHostPorts": "AllowHostPorts determines if the policy allows host ports in the containers.", "allowHostPID": "AllowHostPID determines if the policy allows host pid in the containers.", "allowHostIPC": "AllowHostIPC determines if the policy allows host ipc in the containers.", + "userNamespaceLevel": "userNamespaceLevel determines if the policy allows host users in containers. Valid values are \"AllowHostLevel\", \"RequirePodLevel\", and omitted. When \"AllowHostLevel\" is set, a pod author may set `hostUsers` to either `true` or `false`. When \"RequirePodLevel\" is set, a pod author must set `hostUsers` to `false`. When omitted, the default value is \"AllowHostLevel\".", "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "seLinuxContext": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go index d265d749c..e763d14f6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go @@ -2,7 +2,7 @@ package v1 -// AlibabaCloudPlatformStatusApplyConfiguration represents an declarative configuration of the AlibabaCloudPlatformStatus type for use +// AlibabaCloudPlatformStatusApplyConfiguration represents a declarative configuration of the AlibabaCloudPlatformStatus type for use // with apply. type AlibabaCloudPlatformStatusApplyConfiguration struct { Region *string `json:"region,omitempty"` @@ -10,7 +10,7 @@ type AlibabaCloudPlatformStatusApplyConfiguration struct { ResourceTags []AlibabaCloudResourceTagApplyConfiguration `json:"resourceTags,omitempty"` } -// AlibabaCloudPlatformStatusApplyConfiguration constructs an declarative configuration of the AlibabaCloudPlatformStatus type for use with +// AlibabaCloudPlatformStatusApplyConfiguration constructs a declarative configuration of the AlibabaCloudPlatformStatus type for use with // apply. func AlibabaCloudPlatformStatus() *AlibabaCloudPlatformStatusApplyConfiguration { return &AlibabaCloudPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go index 740028938..38fef6d50 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go @@ -2,14 +2,14 @@ package v1 -// AlibabaCloudResourceTagApplyConfiguration represents an declarative configuration of the AlibabaCloudResourceTag type for use +// AlibabaCloudResourceTagApplyConfiguration represents a declarative configuration of the AlibabaCloudResourceTag type for use // with apply. type AlibabaCloudResourceTagApplyConfiguration struct { Key *string `json:"key,omitempty"` Value *string `json:"value,omitempty"` } -// AlibabaCloudResourceTagApplyConfiguration constructs an declarative configuration of the AlibabaCloudResourceTag type for use with +// AlibabaCloudResourceTagApplyConfiguration constructs a declarative configuration of the AlibabaCloudResourceTag type for use with // apply. func AlibabaCloudResourceTag() *AlibabaCloudResourceTagApplyConfiguration { return &AlibabaCloudResourceTagApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go index 582186356..e10ae5c76 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// APIServerApplyConfiguration represents an declarative configuration of the APIServer type for use +// APIServerApplyConfiguration represents a declarative configuration of the APIServer type for use // with apply. type APIServerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type APIServerApplyConfiguration struct { Status *apiconfigv1.APIServerStatus `json:"status,omitempty"` } -// APIServer constructs an declarative configuration of the APIServer type for use with +// APIServer constructs a declarative configuration of the APIServer type for use with // apply. func APIServer(name string) *APIServerApplyConfiguration { b := &APIServerApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *APIServerApplyConfiguration) WithStatus(value apiconfigv1.APIServerStat b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *APIServerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go index 7e5de50b5..a64f18bb4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// APIServerEncryptionApplyConfiguration represents an declarative configuration of the APIServerEncryption type for use +// APIServerEncryptionApplyConfiguration represents a declarative configuration of the APIServerEncryption type for use // with apply. type APIServerEncryptionApplyConfiguration struct { Type *v1.EncryptionType `json:"type,omitempty"` } -// APIServerEncryptionApplyConfiguration constructs an declarative configuration of the APIServerEncryption type for use with +// APIServerEncryptionApplyConfiguration constructs a declarative configuration of the APIServerEncryption type for use with // apply. func APIServerEncryption() *APIServerEncryptionApplyConfiguration { return &APIServerEncryptionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go index b55943a41..ae1f76215 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go @@ -2,14 +2,14 @@ package v1 -// APIServerNamedServingCertApplyConfiguration represents an declarative configuration of the APIServerNamedServingCert type for use +// APIServerNamedServingCertApplyConfiguration represents a declarative configuration of the APIServerNamedServingCert type for use // with apply. type APIServerNamedServingCertApplyConfiguration struct { Names []string `json:"names,omitempty"` ServingCertificate *SecretNameReferenceApplyConfiguration `json:"servingCertificate,omitempty"` } -// APIServerNamedServingCertApplyConfiguration constructs an declarative configuration of the APIServerNamedServingCert type for use with +// APIServerNamedServingCertApplyConfiguration constructs a declarative configuration of the APIServerNamedServingCert type for use with // apply. func APIServerNamedServingCert() *APIServerNamedServingCertApplyConfiguration { return &APIServerNamedServingCertApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go index 6a7084248..963bea305 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go @@ -2,13 +2,13 @@ package v1 -// APIServerServingCertsApplyConfiguration represents an declarative configuration of the APIServerServingCerts type for use +// APIServerServingCertsApplyConfiguration represents a declarative configuration of the APIServerServingCerts type for use // with apply. type APIServerServingCertsApplyConfiguration struct { NamedCertificates []APIServerNamedServingCertApplyConfiguration `json:"namedCertificates,omitempty"` } -// APIServerServingCertsApplyConfiguration constructs an declarative configuration of the APIServerServingCerts type for use with +// APIServerServingCertsApplyConfiguration constructs a declarative configuration of the APIServerServingCerts type for use with // apply. func APIServerServingCerts() *APIServerServingCertsApplyConfiguration { return &APIServerServingCertsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go index 3e9eaeac0..58f4b0eec 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go @@ -2,7 +2,7 @@ package v1 -// APIServerSpecApplyConfiguration represents an declarative configuration of the APIServerSpec type for use +// APIServerSpecApplyConfiguration represents a declarative configuration of the APIServerSpec type for use // with apply. type APIServerSpecApplyConfiguration struct { ServingCerts *APIServerServingCertsApplyConfiguration `json:"servingCerts,omitempty"` @@ -13,7 +13,7 @@ type APIServerSpecApplyConfiguration struct { Audit *AuditApplyConfiguration `json:"audit,omitempty"` } -// APIServerSpecApplyConfiguration constructs an declarative configuration of the APIServerSpec type for use with +// APIServerSpecApplyConfiguration constructs a declarative configuration of the APIServerSpec type for use with // apply. func APIServerSpec() *APIServerSpecApplyConfiguration { return &APIServerSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go index 8db029e26..49b2e6c79 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// AuditApplyConfiguration represents an declarative configuration of the Audit type for use +// AuditApplyConfiguration represents a declarative configuration of the Audit type for use // with apply. type AuditApplyConfiguration struct { Profile *v1.AuditProfileType `json:"profile,omitempty"` CustomRules []AuditCustomRuleApplyConfiguration `json:"customRules,omitempty"` } -// AuditApplyConfiguration constructs an declarative configuration of the Audit type for use with +// AuditApplyConfiguration constructs a declarative configuration of the Audit type for use with // apply. func Audit() *AuditApplyConfiguration { return &AuditApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go index 80719443e..838f3a22f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// AuditCustomRuleApplyConfiguration represents an declarative configuration of the AuditCustomRule type for use +// AuditCustomRuleApplyConfiguration represents a declarative configuration of the AuditCustomRule type for use // with apply. type AuditCustomRuleApplyConfiguration struct { Group *string `json:"group,omitempty"` Profile *v1.AuditProfileType `json:"profile,omitempty"` } -// AuditCustomRuleApplyConfiguration constructs an declarative configuration of the AuditCustomRule type for use with +// AuditCustomRuleApplyConfiguration constructs a declarative configuration of the AuditCustomRule type for use with // apply. func AuditCustomRule() *AuditCustomRuleApplyConfiguration { return &AuditCustomRuleApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go index 5f5519856..b8fc3dfdf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AuthenticationApplyConfiguration represents an declarative configuration of the Authentication type for use +// AuthenticationApplyConfiguration represents a declarative configuration of the Authentication type for use // with apply. type AuthenticationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type AuthenticationApplyConfiguration struct { Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"` } -// Authentication constructs an declarative configuration of the Authentication type for use with +// Authentication constructs a declarative configuration of the Authentication type for use with // apply. func Authentication(name string) *AuthenticationApplyConfiguration { b := &AuthenticationApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go index f152d261a..27cf2004f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// AuthenticationSpecApplyConfiguration represents an declarative configuration of the AuthenticationSpec type for use +// AuthenticationSpecApplyConfiguration represents a declarative configuration of the AuthenticationSpec type for use // with apply. type AuthenticationSpecApplyConfiguration struct { Type *v1.AuthenticationType `json:"type,omitempty"` @@ -17,7 +17,7 @@ type AuthenticationSpecApplyConfiguration struct { OIDCProviders []OIDCProviderApplyConfiguration `json:"oidcProviders,omitempty"` } -// AuthenticationSpecApplyConfiguration constructs an declarative configuration of the AuthenticationSpec type for use with +// AuthenticationSpecApplyConfiguration constructs a declarative configuration of the AuthenticationSpec type for use with // apply. func AuthenticationSpec() *AuthenticationSpecApplyConfiguration { return &AuthenticationSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go index e1bb74c0c..1539f164b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go @@ -2,14 +2,14 @@ package v1 -// AuthenticationStatusApplyConfiguration represents an declarative configuration of the AuthenticationStatus type for use +// AuthenticationStatusApplyConfiguration represents a declarative configuration of the AuthenticationStatus type for use // with apply. type AuthenticationStatusApplyConfiguration struct { IntegratedOAuthMetadata *ConfigMapNameReferenceApplyConfiguration `json:"integratedOAuthMetadata,omitempty"` OIDCClients []OIDCClientStatusApplyConfiguration `json:"oidcClients,omitempty"` } -// AuthenticationStatusApplyConfiguration constructs an declarative configuration of the AuthenticationStatus type for use with +// AuthenticationStatusApplyConfiguration constructs a declarative configuration of the AuthenticationStatus type for use with // apply. func AuthenticationStatus() *AuthenticationStatusApplyConfiguration { return &AuthenticationStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go index 4f7ce43d1..8ad662e23 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go @@ -2,13 +2,13 @@ package v1 -// AWSDNSSpecApplyConfiguration represents an declarative configuration of the AWSDNSSpec type for use +// AWSDNSSpecApplyConfiguration represents a declarative configuration of the AWSDNSSpec type for use // with apply. type AWSDNSSpecApplyConfiguration struct { PrivateZoneIAMRole *string `json:"privateZoneIAMRole,omitempty"` } -// AWSDNSSpecApplyConfiguration constructs an declarative configuration of the AWSDNSSpec type for use with +// AWSDNSSpecApplyConfiguration constructs a declarative configuration of the AWSDNSSpec type for use with // apply. func AWSDNSSpec() *AWSDNSSpecApplyConfiguration { return &AWSDNSSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go index 9a56b68ba..d7fe57934 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// AWSIngressSpecApplyConfiguration represents an declarative configuration of the AWSIngressSpec type for use +// AWSIngressSpecApplyConfiguration represents a declarative configuration of the AWSIngressSpec type for use // with apply. type AWSIngressSpecApplyConfiguration struct { Type *v1.AWSLBType `json:"type,omitempty"` } -// AWSIngressSpecApplyConfiguration constructs an declarative configuration of the AWSIngressSpec type for use with +// AWSIngressSpecApplyConfiguration constructs a declarative configuration of the AWSIngressSpec type for use with // apply. func AWSIngressSpec() *AWSIngressSpecApplyConfiguration { return &AWSIngressSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go index b8132541f..85361e7a2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go @@ -2,13 +2,13 @@ package v1 -// AWSPlatformSpecApplyConfiguration represents an declarative configuration of the AWSPlatformSpec type for use +// AWSPlatformSpecApplyConfiguration represents a declarative configuration of the AWSPlatformSpec type for use // with apply. type AWSPlatformSpecApplyConfiguration struct { ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` } -// AWSPlatformSpecApplyConfiguration constructs an declarative configuration of the AWSPlatformSpec type for use with +// AWSPlatformSpecApplyConfiguration constructs a declarative configuration of the AWSPlatformSpec type for use with // apply. func AWSPlatformSpec() *AWSPlatformSpecApplyConfiguration { return &AWSPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go index fb317ba27..e5b1b74ea 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go @@ -2,7 +2,7 @@ package v1 -// AWSPlatformStatusApplyConfiguration represents an declarative configuration of the AWSPlatformStatus type for use +// AWSPlatformStatusApplyConfiguration represents a declarative configuration of the AWSPlatformStatus type for use // with apply. type AWSPlatformStatusApplyConfiguration struct { Region *string `json:"region,omitempty"` @@ -10,7 +10,7 @@ type AWSPlatformStatusApplyConfiguration struct { ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` } -// AWSPlatformStatusApplyConfiguration constructs an declarative configuration of the AWSPlatformStatus type for use with +// AWSPlatformStatusApplyConfiguration constructs a declarative configuration of the AWSPlatformStatus type for use with // apply. func AWSPlatformStatus() *AWSPlatformStatusApplyConfiguration { return &AWSPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go index f9f174fc5..766157a07 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go @@ -2,14 +2,14 @@ package v1 -// AWSResourceTagApplyConfiguration represents an declarative configuration of the AWSResourceTag type for use +// AWSResourceTagApplyConfiguration represents a declarative configuration of the AWSResourceTag type for use // with apply. type AWSResourceTagApplyConfiguration struct { Key *string `json:"key,omitempty"` Value *string `json:"value,omitempty"` } -// AWSResourceTagApplyConfiguration constructs an declarative configuration of the AWSResourceTag type for use with +// AWSResourceTagApplyConfiguration constructs a declarative configuration of the AWSResourceTag type for use with // apply. func AWSResourceTag() *AWSResourceTagApplyConfiguration { return &AWSResourceTagApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go index 169e4bb2a..5d4f38882 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go @@ -2,14 +2,14 @@ package v1 -// AWSServiceEndpointApplyConfiguration represents an declarative configuration of the AWSServiceEndpoint type for use +// AWSServiceEndpointApplyConfiguration represents a declarative configuration of the AWSServiceEndpoint type for use // with apply. type AWSServiceEndpointApplyConfiguration struct { Name *string `json:"name,omitempty"` URL *string `json:"url,omitempty"` } -// AWSServiceEndpointApplyConfiguration constructs an declarative configuration of the AWSServiceEndpoint type for use with +// AWSServiceEndpointApplyConfiguration constructs a declarative configuration of the AWSServiceEndpoint type for use with // apply. func AWSServiceEndpoint() *AWSServiceEndpointApplyConfiguration { return &AWSServiceEndpointApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go index 52b291553..442674734 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// AzurePlatformStatusApplyConfiguration represents an declarative configuration of the AzurePlatformStatus type for use +// AzurePlatformStatusApplyConfiguration represents a declarative configuration of the AzurePlatformStatus type for use // with apply. type AzurePlatformStatusApplyConfiguration struct { ResourceGroupName *string `json:"resourceGroupName,omitempty"` @@ -16,7 +16,7 @@ type AzurePlatformStatusApplyConfiguration struct { ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` } -// AzurePlatformStatusApplyConfiguration constructs an declarative configuration of the AzurePlatformStatus type for use with +// AzurePlatformStatusApplyConfiguration constructs a declarative configuration of the AzurePlatformStatus type for use with // apply. func AzurePlatformStatus() *AzurePlatformStatusApplyConfiguration { return &AzurePlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go index f258f0987..980d2a168 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go @@ -2,14 +2,14 @@ package v1 -// AzureResourceTagApplyConfiguration represents an declarative configuration of the AzureResourceTag type for use +// AzureResourceTagApplyConfiguration represents a declarative configuration of the AzureResourceTag type for use // with apply. type AzureResourceTagApplyConfiguration struct { Key *string `json:"key,omitempty"` Value *string `json:"value,omitempty"` } -// AzureResourceTagApplyConfiguration constructs an declarative configuration of the AzureResourceTag type for use with +// AzureResourceTagApplyConfiguration constructs a declarative configuration of the AzureResourceTag type for use with // apply. func AzureResourceTag() *AzureResourceTagApplyConfiguration { return &AzureResourceTagApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go index 7ff5dd99e..a78284764 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// BareMetalPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the BareMetalPlatformLoadBalancer type for use +// BareMetalPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the BareMetalPlatformLoadBalancer type for use // with apply. type BareMetalPlatformLoadBalancerApplyConfiguration struct { Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` } -// BareMetalPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the BareMetalPlatformLoadBalancer type for use with +// BareMetalPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the BareMetalPlatformLoadBalancer type for use with // apply. func BareMetalPlatformLoadBalancer() *BareMetalPlatformLoadBalancerApplyConfiguration { return &BareMetalPlatformLoadBalancerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go index d96c5330b..3140b5548 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// BareMetalPlatformSpecApplyConfiguration represents an declarative configuration of the BareMetalPlatformSpec type for use +// BareMetalPlatformSpecApplyConfiguration represents a declarative configuration of the BareMetalPlatformSpec type for use // with apply. type BareMetalPlatformSpecApplyConfiguration struct { APIServerInternalIPs []v1.IP `json:"apiServerInternalIPs,omitempty"` @@ -14,7 +14,7 @@ type BareMetalPlatformSpecApplyConfiguration struct { MachineNetworks []v1.CIDR `json:"machineNetworks,omitempty"` } -// BareMetalPlatformSpecApplyConfiguration constructs an declarative configuration of the BareMetalPlatformSpec type for use with +// BareMetalPlatformSpecApplyConfiguration constructs a declarative configuration of the BareMetalPlatformSpec type for use with // apply. func BareMetalPlatformSpec() *BareMetalPlatformSpecApplyConfiguration { return &BareMetalPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go index 87873d49a..55b875c7c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go @@ -6,7 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// BareMetalPlatformStatusApplyConfiguration represents an declarative configuration of the BareMetalPlatformStatus type for use +// BareMetalPlatformStatusApplyConfiguration represents a declarative configuration of the BareMetalPlatformStatus type for use // with apply. type BareMetalPlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` @@ -18,7 +18,7 @@ type BareMetalPlatformStatusApplyConfiguration struct { MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } -// BareMetalPlatformStatusApplyConfiguration constructs an declarative configuration of the BareMetalPlatformStatus type for use with +// BareMetalPlatformStatusApplyConfiguration constructs a declarative configuration of the BareMetalPlatformStatus type for use with // apply. func BareMetalPlatformStatus() *BareMetalPlatformStatusApplyConfiguration { return &BareMetalPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go index 9d181ebde..7cbd241d0 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go @@ -2,13 +2,13 @@ package v1 -// BasicAuthIdentityProviderApplyConfiguration represents an declarative configuration of the BasicAuthIdentityProvider type for use +// BasicAuthIdentityProviderApplyConfiguration represents a declarative configuration of the BasicAuthIdentityProvider type for use // with apply. type BasicAuthIdentityProviderApplyConfiguration struct { OAuthRemoteConnectionInfoApplyConfiguration `json:",inline"` } -// BasicAuthIdentityProviderApplyConfiguration constructs an declarative configuration of the BasicAuthIdentityProvider type for use with +// BasicAuthIdentityProviderApplyConfiguration constructs a declarative configuration of the BasicAuthIdentityProvider type for use with // apply. func BasicAuthIdentityProvider() *BasicAuthIdentityProviderApplyConfiguration { return &BasicAuthIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go index 39100461a..06aa12e0f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// BuildApplyConfiguration represents an declarative configuration of the Build type for use +// BuildApplyConfiguration represents a declarative configuration of the Build type for use // with apply. type BuildApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -19,7 +19,7 @@ type BuildApplyConfiguration struct { Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"` } -// Build constructs an declarative configuration of the Build type for use with +// Build constructs a declarative configuration of the Build type for use with // apply. func Build(name string) *BuildApplyConfiguration { b := &BuildApplyConfiguration{} @@ -229,3 +229,9 @@ func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *BuildApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go index 347906b3b..ece924419 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// BuildDefaultsApplyConfiguration represents an declarative configuration of the BuildDefaults type for use +// BuildDefaultsApplyConfiguration represents a declarative configuration of the BuildDefaults type for use // with apply. type BuildDefaultsApplyConfiguration struct { DefaultProxy *ProxySpecApplyConfiguration `json:"defaultProxy,omitempty"` @@ -16,7 +16,7 @@ type BuildDefaultsApplyConfiguration struct { Resources *corev1.ResourceRequirements `json:"resources,omitempty"` } -// BuildDefaultsApplyConfiguration constructs an declarative configuration of the BuildDefaults type for use with +// BuildDefaultsApplyConfiguration constructs a declarative configuration of the BuildDefaults type for use with // apply. func BuildDefaults() *BuildDefaultsApplyConfiguration { return &BuildDefaultsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go index 7ce64634a..948bc9e8a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// BuildOverridesApplyConfiguration represents an declarative configuration of the BuildOverrides type for use +// BuildOverridesApplyConfiguration represents a declarative configuration of the BuildOverrides type for use // with apply. type BuildOverridesApplyConfiguration struct { ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"` @@ -15,7 +15,7 @@ type BuildOverridesApplyConfiguration struct { ForcePull *bool `json:"forcePull,omitempty"` } -// BuildOverridesApplyConfiguration constructs an declarative configuration of the BuildOverrides type for use with +// BuildOverridesApplyConfiguration constructs a declarative configuration of the BuildOverrides type for use with // apply. func BuildOverrides() *BuildOverridesApplyConfiguration { return &BuildOverridesApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go index 521cef0e8..1b8cb7054 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go @@ -2,7 +2,7 @@ package v1 -// BuildSpecApplyConfiguration represents an declarative configuration of the BuildSpec type for use +// BuildSpecApplyConfiguration represents a declarative configuration of the BuildSpec type for use // with apply. type BuildSpecApplyConfiguration struct { AdditionalTrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"additionalTrustedCA,omitempty"` @@ -10,7 +10,7 @@ type BuildSpecApplyConfiguration struct { BuildOverrides *BuildOverridesApplyConfiguration `json:"buildOverrides,omitempty"` } -// BuildSpecApplyConfiguration constructs an declarative configuration of the BuildSpec type for use with +// BuildSpecApplyConfiguration constructs a declarative configuration of the BuildSpec type for use with // apply. func BuildSpec() *BuildSpecApplyConfiguration { return &BuildSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go index 2d7a55a78..60bf4ed6b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// CloudControllerManagerStatusApplyConfiguration represents an declarative configuration of the CloudControllerManagerStatus type for use +// CloudControllerManagerStatusApplyConfiguration represents a declarative configuration of the CloudControllerManagerStatus type for use // with apply. type CloudControllerManagerStatusApplyConfiguration struct { State *v1.CloudControllerManagerState `json:"state,omitempty"` } -// CloudControllerManagerStatusApplyConfiguration constructs an declarative configuration of the CloudControllerManagerStatus type for use with +// CloudControllerManagerStatusApplyConfiguration constructs a declarative configuration of the CloudControllerManagerStatus type for use with // apply. func CloudControllerManagerStatus() *CloudControllerManagerStatusApplyConfiguration { return &CloudControllerManagerStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go index c84f6c776..5be77a3a3 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// CloudLoadBalancerConfigApplyConfiguration represents an declarative configuration of the CloudLoadBalancerConfig type for use +// CloudLoadBalancerConfigApplyConfiguration represents a declarative configuration of the CloudLoadBalancerConfig type for use // with apply. type CloudLoadBalancerConfigApplyConfiguration struct { DNSType *v1.DNSType `json:"dnsType,omitempty"` ClusterHosted *CloudLoadBalancerIPsApplyConfiguration `json:"clusterHosted,omitempty"` } -// CloudLoadBalancerConfigApplyConfiguration constructs an declarative configuration of the CloudLoadBalancerConfig type for use with +// CloudLoadBalancerConfigApplyConfiguration constructs a declarative configuration of the CloudLoadBalancerConfig type for use with // apply. func CloudLoadBalancerConfig() *CloudLoadBalancerConfigApplyConfiguration { return &CloudLoadBalancerConfigApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go index 6480177fe..baef18811 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// CloudLoadBalancerIPsApplyConfiguration represents an declarative configuration of the CloudLoadBalancerIPs type for use +// CloudLoadBalancerIPsApplyConfiguration represents a declarative configuration of the CloudLoadBalancerIPs type for use // with apply. type CloudLoadBalancerIPsApplyConfiguration struct { APIIntLoadBalancerIPs []v1.IP `json:"apiIntLoadBalancerIPs,omitempty"` @@ -14,7 +14,7 @@ type CloudLoadBalancerIPsApplyConfiguration struct { IngressLoadBalancerIPs []v1.IP `json:"ingressLoadBalancerIPs,omitempty"` } -// CloudLoadBalancerIPsApplyConfiguration constructs an declarative configuration of the CloudLoadBalancerIPs type for use with +// CloudLoadBalancerIPsApplyConfiguration constructs a declarative configuration of the CloudLoadBalancerIPs type for use with // apply. func CloudLoadBalancerIPs() *CloudLoadBalancerIPsApplyConfiguration { return &CloudLoadBalancerIPsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go index 145fa267a..d71c182cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go @@ -2,14 +2,14 @@ package v1 -// ClusterConditionApplyConfiguration represents an declarative configuration of the ClusterCondition type for use +// ClusterConditionApplyConfiguration represents a declarative configuration of the ClusterCondition type for use // with apply. type ClusterConditionApplyConfiguration struct { Type *string `json:"type,omitempty"` PromQL *PromQLClusterConditionApplyConfiguration `json:"promql,omitempty"` } -// ClusterConditionApplyConfiguration constructs an declarative configuration of the ClusterCondition type for use with +// ClusterConditionApplyConfiguration constructs a declarative configuration of the ClusterCondition type for use with // apply. func ClusterCondition() *ClusterConditionApplyConfiguration { return &ClusterConditionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go index fe03d3355..ac180f893 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go @@ -2,14 +2,14 @@ package v1 -// ClusterNetworkEntryApplyConfiguration represents an declarative configuration of the ClusterNetworkEntry type for use +// ClusterNetworkEntryApplyConfiguration represents a declarative configuration of the ClusterNetworkEntry type for use // with apply. type ClusterNetworkEntryApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` HostPrefix *uint32 `json:"hostPrefix,omitempty"` } -// ClusterNetworkEntryApplyConfiguration constructs an declarative configuration of the ClusterNetworkEntry type for use with +// ClusterNetworkEntryApplyConfiguration constructs a declarative configuration of the ClusterNetworkEntry type for use with // apply. func ClusterNetworkEntry() *ClusterNetworkEntryApplyConfiguration { return &ClusterNetworkEntryApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go index ab83fa08d..6371179a8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterOperatorApplyConfiguration represents an declarative configuration of the ClusterOperator type for use +// ClusterOperatorApplyConfiguration represents a declarative configuration of the ClusterOperator type for use // with apply. type ClusterOperatorApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ClusterOperatorApplyConfiguration struct { Status *ClusterOperatorStatusApplyConfiguration `json:"status,omitempty"` } -// ClusterOperator constructs an declarative configuration of the ClusterOperator type for use with +// ClusterOperator constructs a declarative configuration of the ClusterOperator type for use with // apply. func ClusterOperator(name string) *ClusterOperatorApplyConfiguration { b := &ClusterOperatorApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ClusterOperatorApplyConfiguration) WithStatus(value *ClusterOperatorSta b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterOperatorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go index 3fac7d9b6..d5a198965 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go @@ -6,7 +6,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// ClusterOperatorStatusApplyConfiguration represents an declarative configuration of the ClusterOperatorStatus type for use +// ClusterOperatorStatusApplyConfiguration represents a declarative configuration of the ClusterOperatorStatus type for use // with apply. type ClusterOperatorStatusApplyConfiguration struct { Conditions []ClusterOperatorStatusConditionApplyConfiguration `json:"conditions,omitempty"` @@ -15,7 +15,7 @@ type ClusterOperatorStatusApplyConfiguration struct { Extension *runtime.RawExtension `json:"extension,omitempty"` } -// ClusterOperatorStatusApplyConfiguration constructs an declarative configuration of the ClusterOperatorStatus type for use with +// ClusterOperatorStatusApplyConfiguration constructs a declarative configuration of the ClusterOperatorStatus type for use with // apply. func ClusterOperatorStatus() *ClusterOperatorStatusApplyConfiguration { return &ClusterOperatorStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go index 5e52013c8..557f75d2e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go @@ -7,7 +7,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ClusterOperatorStatusConditionApplyConfiguration represents an declarative configuration of the ClusterOperatorStatusCondition type for use +// ClusterOperatorStatusConditionApplyConfiguration represents a declarative configuration of the ClusterOperatorStatusCondition type for use // with apply. type ClusterOperatorStatusConditionApplyConfiguration struct { Type *v1.ClusterStatusConditionType `json:"type,omitempty"` @@ -17,7 +17,7 @@ type ClusterOperatorStatusConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ClusterOperatorStatusConditionApplyConfiguration constructs an declarative configuration of the ClusterOperatorStatusCondition type for use with +// ClusterOperatorStatusConditionApplyConfiguration constructs a declarative configuration of the ClusterOperatorStatusCondition type for use with // apply. func ClusterOperatorStatusCondition() *ClusterOperatorStatusConditionApplyConfiguration { return &ClusterOperatorStatusConditionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go index 24d5e143c..52f2e5cb8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterVersionApplyConfiguration represents an declarative configuration of the ClusterVersion type for use +// ClusterVersionApplyConfiguration represents a declarative configuration of the ClusterVersion type for use // with apply. type ClusterVersionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ClusterVersionApplyConfiguration struct { Status *ClusterVersionStatusApplyConfiguration `json:"status,omitempty"` } -// ClusterVersion constructs an declarative configuration of the ClusterVersion type for use with +// ClusterVersion constructs a declarative configuration of the ClusterVersion type for use with // apply. func ClusterVersion(name string) *ClusterVersionApplyConfiguration { b := &ClusterVersionApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ClusterVersionApplyConfiguration) WithStatus(value *ClusterVersionStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterVersionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go index b4a28f1b2..254eb00e9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ClusterVersionCapabilitiesSpecApplyConfiguration represents an declarative configuration of the ClusterVersionCapabilitiesSpec type for use +// ClusterVersionCapabilitiesSpecApplyConfiguration represents a declarative configuration of the ClusterVersionCapabilitiesSpec type for use // with apply. type ClusterVersionCapabilitiesSpecApplyConfiguration struct { BaselineCapabilitySet *v1.ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` AdditionalEnabledCapabilities []v1.ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` } -// ClusterVersionCapabilitiesSpecApplyConfiguration constructs an declarative configuration of the ClusterVersionCapabilitiesSpec type for use with +// ClusterVersionCapabilitiesSpecApplyConfiguration constructs a declarative configuration of the ClusterVersionCapabilitiesSpec type for use with // apply. func ClusterVersionCapabilitiesSpec() *ClusterVersionCapabilitiesSpecApplyConfiguration { return &ClusterVersionCapabilitiesSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go index 48c4363f1..05350241f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ClusterVersionCapabilitiesStatusApplyConfiguration represents an declarative configuration of the ClusterVersionCapabilitiesStatus type for use +// ClusterVersionCapabilitiesStatusApplyConfiguration represents a declarative configuration of the ClusterVersionCapabilitiesStatus type for use // with apply. type ClusterVersionCapabilitiesStatusApplyConfiguration struct { EnabledCapabilities []v1.ClusterVersionCapability `json:"enabledCapabilities,omitempty"` KnownCapabilities []v1.ClusterVersionCapability `json:"knownCapabilities,omitempty"` } -// ClusterVersionCapabilitiesStatusApplyConfiguration constructs an declarative configuration of the ClusterVersionCapabilitiesStatus type for use with +// ClusterVersionCapabilitiesStatusApplyConfiguration constructs a declarative configuration of the ClusterVersionCapabilitiesStatus type for use with // apply. func ClusterVersionCapabilitiesStatus() *ClusterVersionCapabilitiesStatusApplyConfiguration { return &ClusterVersionCapabilitiesStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go index e1fd4d37d..b440c613b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ClusterVersionSpecApplyConfiguration represents an declarative configuration of the ClusterVersionSpec type for use +// ClusterVersionSpecApplyConfiguration represents a declarative configuration of the ClusterVersionSpec type for use // with apply. type ClusterVersionSpecApplyConfiguration struct { ClusterID *v1.ClusterID `json:"clusterID,omitempty"` @@ -18,7 +18,7 @@ type ClusterVersionSpecApplyConfiguration struct { Overrides []ComponentOverrideApplyConfiguration `json:"overrides,omitempty"` } -// ClusterVersionSpecApplyConfiguration constructs an declarative configuration of the ClusterVersionSpec type for use with +// ClusterVersionSpecApplyConfiguration constructs a declarative configuration of the ClusterVersionSpec type for use with // apply. func ClusterVersionSpec() *ClusterVersionSpecApplyConfiguration { return &ClusterVersionSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go index 3e9f45094..e966cf424 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go @@ -2,7 +2,7 @@ package v1 -// ClusterVersionStatusApplyConfiguration represents an declarative configuration of the ClusterVersionStatus type for use +// ClusterVersionStatusApplyConfiguration represents a declarative configuration of the ClusterVersionStatus type for use // with apply. type ClusterVersionStatusApplyConfiguration struct { Desired *ReleaseApplyConfiguration `json:"desired,omitempty"` @@ -15,7 +15,7 @@ type ClusterVersionStatusApplyConfiguration struct { ConditionalUpdates []ConditionalUpdateApplyConfiguration `json:"conditionalUpdates,omitempty"` } -// ClusterVersionStatusApplyConfiguration constructs an declarative configuration of the ClusterVersionStatus type for use with +// ClusterVersionStatusApplyConfiguration constructs a declarative configuration of the ClusterVersionStatus type for use with // apply. func ClusterVersionStatus() *ClusterVersionStatusApplyConfiguration { return &ClusterVersionStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go index 8467acef4..e87332d89 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go @@ -2,7 +2,7 @@ package v1 -// ComponentOverrideApplyConfiguration represents an declarative configuration of the ComponentOverride type for use +// ComponentOverrideApplyConfiguration represents a declarative configuration of the ComponentOverride type for use // with apply. type ComponentOverrideApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -12,7 +12,7 @@ type ComponentOverrideApplyConfiguration struct { Unmanaged *bool `json:"unmanaged,omitempty"` } -// ComponentOverrideApplyConfiguration constructs an declarative configuration of the ComponentOverride type for use with +// ComponentOverrideApplyConfiguration constructs a declarative configuration of the ComponentOverride type for use with // apply. func ComponentOverride() *ComponentOverrideApplyConfiguration { return &ComponentOverrideApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go index 8e5973d91..f8a2c5e51 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ComponentRouteSpecApplyConfiguration represents an declarative configuration of the ComponentRouteSpec type for use +// ComponentRouteSpecApplyConfiguration represents a declarative configuration of the ComponentRouteSpec type for use // with apply. type ComponentRouteSpecApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -15,7 +15,7 @@ type ComponentRouteSpecApplyConfiguration struct { ServingCertKeyPairSecret *SecretNameReferenceApplyConfiguration `json:"servingCertKeyPairSecret,omitempty"` } -// ComponentRouteSpecApplyConfiguration constructs an declarative configuration of the ComponentRouteSpec type for use with +// ComponentRouteSpecApplyConfiguration constructs a declarative configuration of the ComponentRouteSpec type for use with // apply. func ComponentRouteSpec() *ComponentRouteSpecApplyConfiguration { return &ComponentRouteSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go index fe8c275de..abf378c84 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go @@ -4,22 +4,22 @@ package v1 import ( v1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ComponentRouteStatusApplyConfiguration represents an declarative configuration of the ComponentRouteStatus type for use +// ComponentRouteStatusApplyConfiguration represents a declarative configuration of the ComponentRouteStatus type for use // with apply. type ComponentRouteStatusApplyConfiguration struct { - Namespace *string `json:"namespace,omitempty"` - Name *string `json:"name,omitempty"` - DefaultHostname *v1.Hostname `json:"defaultHostname,omitempty"` - ConsumingUsers []v1.ConsumingUser `json:"consumingUsers,omitempty"` - CurrentHostnames []v1.Hostname `json:"currentHostnames,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` - RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + DefaultHostname *v1.Hostname `json:"defaultHostname,omitempty"` + ConsumingUsers []v1.ConsumingUser `json:"consumingUsers,omitempty"` + CurrentHostnames []v1.Hostname `json:"currentHostnames,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` } -// ComponentRouteStatusApplyConfiguration constructs an declarative configuration of the ComponentRouteStatus type for use with +// ComponentRouteStatusApplyConfiguration constructs a declarative configuration of the ComponentRouteStatus type for use with // apply. func ComponentRouteStatus() *ComponentRouteStatusApplyConfiguration { return &ComponentRouteStatusApplyConfiguration{} @@ -72,9 +72,12 @@ func (b *ComponentRouteStatusApplyConfiguration) WithCurrentHostnames(values ... // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *ComponentRouteStatusApplyConfiguration) WithConditions(values ...metav1.Condition) *ComponentRouteStatusApplyConfiguration { +func (b *ComponentRouteStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ComponentRouteStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go index 35205f82e..f183fc6e2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go @@ -3,18 +3,18 @@ package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ConditionalUpdateApplyConfiguration represents an declarative configuration of the ConditionalUpdate type for use +// ConditionalUpdateApplyConfiguration represents a declarative configuration of the ConditionalUpdate type for use // with apply. type ConditionalUpdateApplyConfiguration struct { Release *ReleaseApplyConfiguration `json:"release,omitempty"` Risks []ConditionalUpdateRiskApplyConfiguration `json:"risks,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ConditionalUpdateApplyConfiguration constructs an declarative configuration of the ConditionalUpdate type for use with +// ConditionalUpdateApplyConfiguration constructs a declarative configuration of the ConditionalUpdate type for use with // apply. func ConditionalUpdate() *ConditionalUpdateApplyConfiguration { return &ConditionalUpdateApplyConfiguration{} @@ -44,9 +44,12 @@ func (b *ConditionalUpdateApplyConfiguration) WithRisks(values ...*ConditionalUp // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *ConditionalUpdateApplyConfiguration) WithConditions(values ...metav1.Condition) *ConditionalUpdateApplyConfiguration { +func (b *ConditionalUpdateApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ConditionalUpdateApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go index 49ff03f59..6debb6e62 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go @@ -2,7 +2,7 @@ package v1 -// ConditionalUpdateRiskApplyConfiguration represents an declarative configuration of the ConditionalUpdateRisk type for use +// ConditionalUpdateRiskApplyConfiguration represents a declarative configuration of the ConditionalUpdateRisk type for use // with apply. type ConditionalUpdateRiskApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -11,7 +11,7 @@ type ConditionalUpdateRiskApplyConfiguration struct { MatchingRules []ClusterConditionApplyConfiguration `json:"matchingRules,omitempty"` } -// ConditionalUpdateRiskApplyConfiguration constructs an declarative configuration of the ConditionalUpdateRisk type for use with +// ConditionalUpdateRiskApplyConfiguration constructs a declarative configuration of the ConditionalUpdateRisk type for use with // apply. func ConditionalUpdateRisk() *ConditionalUpdateRiskApplyConfiguration { return &ConditionalUpdateRiskApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go index 4f03bf8b1..3c70be2c1 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go @@ -2,14 +2,14 @@ package v1 -// ConfigMapFileReferenceApplyConfiguration represents an declarative configuration of the ConfigMapFileReference type for use +// ConfigMapFileReferenceApplyConfiguration represents a declarative configuration of the ConfigMapFileReference type for use // with apply. type ConfigMapFileReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` Key *string `json:"key,omitempty"` } -// ConfigMapFileReferenceApplyConfiguration constructs an declarative configuration of the ConfigMapFileReference type for use with +// ConfigMapFileReferenceApplyConfiguration constructs a declarative configuration of the ConfigMapFileReference type for use with // apply. func ConfigMapFileReference() *ConfigMapFileReferenceApplyConfiguration { return &ConfigMapFileReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go index b85607ef4..8236ba123 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go @@ -2,13 +2,13 @@ package v1 -// ConfigMapNameReferenceApplyConfiguration represents an declarative configuration of the ConfigMapNameReference type for use +// ConfigMapNameReferenceApplyConfiguration represents a declarative configuration of the ConfigMapNameReference type for use // with apply. type ConfigMapNameReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ConfigMapNameReferenceApplyConfiguration constructs an declarative configuration of the ConfigMapNameReference type for use with +// ConfigMapNameReferenceApplyConfiguration constructs a declarative configuration of the ConfigMapNameReference type for use with // apply. func ConfigMapNameReference() *ConfigMapNameReferenceApplyConfiguration { return &ConfigMapNameReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go index 64188c220..58bf394da 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ConsoleApplyConfiguration represents an declarative configuration of the Console type for use +// ConsoleApplyConfiguration represents a declarative configuration of the Console type for use // with apply. type ConsoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ConsoleApplyConfiguration struct { Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"` } -// Console constructs an declarative configuration of the Console type for use with +// Console constructs a declarative configuration of the Console type for use with // apply. func Console(name string) *ConsoleApplyConfiguration { b := &ConsoleApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go index 5c352fb02..cdc3aa732 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go @@ -2,13 +2,13 @@ package v1 -// ConsoleAuthenticationApplyConfiguration represents an declarative configuration of the ConsoleAuthentication type for use +// ConsoleAuthenticationApplyConfiguration represents a declarative configuration of the ConsoleAuthentication type for use // with apply. type ConsoleAuthenticationApplyConfiguration struct { LogoutRedirect *string `json:"logoutRedirect,omitempty"` } -// ConsoleAuthenticationApplyConfiguration constructs an declarative configuration of the ConsoleAuthentication type for use with +// ConsoleAuthenticationApplyConfiguration constructs a declarative configuration of the ConsoleAuthentication type for use with // apply. func ConsoleAuthentication() *ConsoleAuthenticationApplyConfiguration { return &ConsoleAuthenticationApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go index ba7697106..0ce163b2b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go @@ -2,13 +2,13 @@ package v1 -// ConsoleSpecApplyConfiguration represents an declarative configuration of the ConsoleSpec type for use +// ConsoleSpecApplyConfiguration represents a declarative configuration of the ConsoleSpec type for use // with apply. type ConsoleSpecApplyConfiguration struct { Authentication *ConsoleAuthenticationApplyConfiguration `json:"authentication,omitempty"` } -// ConsoleSpecApplyConfiguration constructs an declarative configuration of the ConsoleSpec type for use with +// ConsoleSpecApplyConfiguration constructs a declarative configuration of the ConsoleSpec type for use with // apply. func ConsoleSpec() *ConsoleSpecApplyConfiguration { return &ConsoleSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go index 33e04b37a..f1336def3 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go @@ -2,13 +2,13 @@ package v1 -// ConsoleStatusApplyConfiguration represents an declarative configuration of the ConsoleStatus type for use +// ConsoleStatusApplyConfiguration represents a declarative configuration of the ConsoleStatus type for use // with apply. type ConsoleStatusApplyConfiguration struct { ConsoleURL *string `json:"consoleURL,omitempty"` } -// ConsoleStatusApplyConfiguration constructs an declarative configuration of the ConsoleStatus type for use with +// ConsoleStatusApplyConfiguration constructs a declarative configuration of the ConsoleStatus type for use with // apply. func ConsoleStatus() *ConsoleStatusApplyConfiguration { return &ConsoleStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go index 0ce419b28..a0a648287 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// CustomFeatureGatesApplyConfiguration represents an declarative configuration of the CustomFeatureGates type for use +// CustomFeatureGatesApplyConfiguration represents a declarative configuration of the CustomFeatureGates type for use // with apply. type CustomFeatureGatesApplyConfiguration struct { Enabled []v1.FeatureGateName `json:"enabled,omitempty"` Disabled []v1.FeatureGateName `json:"disabled,omitempty"` } -// CustomFeatureGatesApplyConfiguration constructs an declarative configuration of the CustomFeatureGates type for use with +// CustomFeatureGatesApplyConfiguration constructs a declarative configuration of the CustomFeatureGates type for use with // apply. func CustomFeatureGates() *CustomFeatureGatesApplyConfiguration { return &CustomFeatureGatesApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go index cea54d882..f323e11b1 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go @@ -6,13 +6,13 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// CustomTLSProfileApplyConfiguration represents an declarative configuration of the CustomTLSProfile type for use +// CustomTLSProfileApplyConfiguration represents a declarative configuration of the CustomTLSProfile type for use // with apply. type CustomTLSProfileApplyConfiguration struct { TLSProfileSpecApplyConfiguration `json:",inline"` } -// CustomTLSProfileApplyConfiguration constructs an declarative configuration of the CustomTLSProfile type for use with +// CustomTLSProfileApplyConfiguration constructs a declarative configuration of the CustomTLSProfile type for use with // apply. func CustomTLSProfile() *CustomTLSProfileApplyConfiguration { return &CustomTLSProfileApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go index bb312e756..20742aec9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go @@ -2,13 +2,13 @@ package v1 -// DeprecatedWebhookTokenAuthenticatorApplyConfiguration represents an declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use +// DeprecatedWebhookTokenAuthenticatorApplyConfiguration represents a declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use // with apply. type DeprecatedWebhookTokenAuthenticatorApplyConfiguration struct { KubeConfig *SecretNameReferenceApplyConfiguration `json:"kubeConfig,omitempty"` } -// DeprecatedWebhookTokenAuthenticatorApplyConfiguration constructs an declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use with +// DeprecatedWebhookTokenAuthenticatorApplyConfiguration constructs a declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use with // apply. func DeprecatedWebhookTokenAuthenticator() *DeprecatedWebhookTokenAuthenticatorApplyConfiguration { return &DeprecatedWebhookTokenAuthenticatorApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go index 2567ddf02..b01e456f5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DNSApplyConfiguration represents an declarative configuration of the DNS type for use +// DNSApplyConfiguration represents a declarative configuration of the DNS type for use // with apply. type DNSApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type DNSApplyConfiguration struct { Status *apiconfigv1.DNSStatus `json:"status,omitempty"` } -// DNS constructs an declarative configuration of the DNS type for use with +// DNS constructs a declarative configuration of the DNS type for use with // apply. func DNS(name string) *DNSApplyConfiguration { b := &DNSApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *DNSApplyConfiguration) WithStatus(value apiconfigv1.DNSStatus) *DNSAppl b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DNSApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go index 8f43c8c5f..fc15db1ef 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// DNSPlatformSpecApplyConfiguration represents an declarative configuration of the DNSPlatformSpec type for use +// DNSPlatformSpecApplyConfiguration represents a declarative configuration of the DNSPlatformSpec type for use // with apply. type DNSPlatformSpecApplyConfiguration struct { Type *v1.PlatformType `json:"type,omitempty"` AWS *AWSDNSSpecApplyConfiguration `json:"aws,omitempty"` } -// DNSPlatformSpecApplyConfiguration constructs an declarative configuration of the DNSPlatformSpec type for use with +// DNSPlatformSpecApplyConfiguration constructs a declarative configuration of the DNSPlatformSpec type for use with // apply. func DNSPlatformSpec() *DNSPlatformSpecApplyConfiguration { return &DNSPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go index b534ef943..fbc8b60e7 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go @@ -2,7 +2,7 @@ package v1 -// DNSSpecApplyConfiguration represents an declarative configuration of the DNSSpec type for use +// DNSSpecApplyConfiguration represents a declarative configuration of the DNSSpec type for use // with apply. type DNSSpecApplyConfiguration struct { BaseDomain *string `json:"baseDomain,omitempty"` @@ -11,7 +11,7 @@ type DNSSpecApplyConfiguration struct { Platform *DNSPlatformSpecApplyConfiguration `json:"platform,omitempty"` } -// DNSSpecApplyConfiguration constructs an declarative configuration of the DNSSpec type for use with +// DNSSpecApplyConfiguration constructs a declarative configuration of the DNSSpec type for use with // apply. func DNSSpec() *DNSSpecApplyConfiguration { return &DNSSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go index 63b8d1fcc..39ef2776e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go @@ -2,14 +2,14 @@ package v1 -// DNSZoneApplyConfiguration represents an declarative configuration of the DNSZone type for use +// DNSZoneApplyConfiguration represents a declarative configuration of the DNSZone type for use // with apply. type DNSZoneApplyConfiguration struct { ID *string `json:"id,omitempty"` Tags map[string]string `json:"tags,omitempty"` } -// DNSZoneApplyConfiguration constructs an declarative configuration of the DNSZone type for use with +// DNSZoneApplyConfiguration constructs a declarative configuration of the DNSZone type for use with // apply. func DNSZone() *DNSZoneApplyConfiguration { return &DNSZoneApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go index 2dbb3c386..8e17df603 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go @@ -2,14 +2,14 @@ package v1 -// EquinixMetalPlatformStatusApplyConfiguration represents an declarative configuration of the EquinixMetalPlatformStatus type for use +// EquinixMetalPlatformStatusApplyConfiguration represents a declarative configuration of the EquinixMetalPlatformStatus type for use // with apply. type EquinixMetalPlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` IngressIP *string `json:"ingressIP,omitempty"` } -// EquinixMetalPlatformStatusApplyConfiguration constructs an declarative configuration of the EquinixMetalPlatformStatus type for use with +// EquinixMetalPlatformStatusApplyConfiguration constructs a declarative configuration of the EquinixMetalPlatformStatus type for use with // apply. func EquinixMetalPlatformStatus() *EquinixMetalPlatformStatusApplyConfiguration { return &EquinixMetalPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go index d9eb037ad..d3b9c1746 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go @@ -2,14 +2,14 @@ package v1 -// ExternalIPConfigApplyConfiguration represents an declarative configuration of the ExternalIPConfig type for use +// ExternalIPConfigApplyConfiguration represents a declarative configuration of the ExternalIPConfig type for use // with apply. type ExternalIPConfigApplyConfiguration struct { Policy *ExternalIPPolicyApplyConfiguration `json:"policy,omitempty"` AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` } -// ExternalIPConfigApplyConfiguration constructs an declarative configuration of the ExternalIPConfig type for use with +// ExternalIPConfigApplyConfiguration constructs a declarative configuration of the ExternalIPConfig type for use with // apply. func ExternalIPConfig() *ExternalIPConfigApplyConfiguration { return &ExternalIPConfigApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go index c368ffac8..269d934b9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go @@ -2,14 +2,14 @@ package v1 -// ExternalIPPolicyApplyConfiguration represents an declarative configuration of the ExternalIPPolicy type for use +// ExternalIPPolicyApplyConfiguration represents a declarative configuration of the ExternalIPPolicy type for use // with apply. type ExternalIPPolicyApplyConfiguration struct { AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` } -// ExternalIPPolicyApplyConfiguration constructs an declarative configuration of the ExternalIPPolicy type for use with +// ExternalIPPolicyApplyConfiguration constructs a declarative configuration of the ExternalIPPolicy type for use with // apply. func ExternalIPPolicy() *ExternalIPPolicyApplyConfiguration { return &ExternalIPPolicyApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go index e9d5ccae5..d7640e142 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go @@ -2,13 +2,13 @@ package v1 -// ExternalPlatformSpecApplyConfiguration represents an declarative configuration of the ExternalPlatformSpec type for use +// ExternalPlatformSpecApplyConfiguration represents a declarative configuration of the ExternalPlatformSpec type for use // with apply. type ExternalPlatformSpecApplyConfiguration struct { PlatformName *string `json:"platformName,omitempty"` } -// ExternalPlatformSpecApplyConfiguration constructs an declarative configuration of the ExternalPlatformSpec type for use with +// ExternalPlatformSpecApplyConfiguration constructs a declarative configuration of the ExternalPlatformSpec type for use with // apply. func ExternalPlatformSpec() *ExternalPlatformSpecApplyConfiguration { return &ExternalPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go index 12e246227..65f8f2b10 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go @@ -2,13 +2,13 @@ package v1 -// ExternalPlatformStatusApplyConfiguration represents an declarative configuration of the ExternalPlatformStatus type for use +// ExternalPlatformStatusApplyConfiguration represents a declarative configuration of the ExternalPlatformStatus type for use // with apply. type ExternalPlatformStatusApplyConfiguration struct { CloudControllerManager *CloudControllerManagerStatusApplyConfiguration `json:"cloudControllerManager,omitempty"` } -// ExternalPlatformStatusApplyConfiguration constructs an declarative configuration of the ExternalPlatformStatus type for use with +// ExternalPlatformStatusApplyConfiguration constructs a declarative configuration of the ExternalPlatformStatus type for use with // apply. func ExternalPlatformStatus() *ExternalPlatformStatusApplyConfiguration { return &ExternalPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go index 4ba3ab9c5..b17945290 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FeatureGateApplyConfiguration represents an declarative configuration of the FeatureGate type for use +// FeatureGateApplyConfiguration represents a declarative configuration of the FeatureGate type for use // with apply. type FeatureGateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type FeatureGateApplyConfiguration struct { Status *FeatureGateStatusApplyConfiguration `json:"status,omitempty"` } -// FeatureGate constructs an declarative configuration of the FeatureGate type for use with +// FeatureGate constructs a declarative configuration of the FeatureGate type for use with // apply. func FeatureGate(name string) *FeatureGateApplyConfiguration { b := &FeatureGateApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *FeatureGateApplyConfiguration) WithStatus(value *FeatureGateStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FeatureGateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go index 817cf44f6..200460a29 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// FeatureGateAttributesApplyConfiguration represents an declarative configuration of the FeatureGateAttributes type for use +// FeatureGateAttributesApplyConfiguration represents a declarative configuration of the FeatureGateAttributes type for use // with apply. type FeatureGateAttributesApplyConfiguration struct { Name *v1.FeatureGateName `json:"name,omitempty"` } -// FeatureGateAttributesApplyConfiguration constructs an declarative configuration of the FeatureGateAttributes type for use with +// FeatureGateAttributesApplyConfiguration constructs a declarative configuration of the FeatureGateAttributes type for use with // apply. func FeatureGateAttributes() *FeatureGateAttributesApplyConfiguration { return &FeatureGateAttributesApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go index 61bd51ca2..c451f74df 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go @@ -2,7 +2,7 @@ package v1 -// FeatureGateDetailsApplyConfiguration represents an declarative configuration of the FeatureGateDetails type for use +// FeatureGateDetailsApplyConfiguration represents a declarative configuration of the FeatureGateDetails type for use // with apply. type FeatureGateDetailsApplyConfiguration struct { Version *string `json:"version,omitempty"` @@ -10,7 +10,7 @@ type FeatureGateDetailsApplyConfiguration struct { Disabled []FeatureGateAttributesApplyConfiguration `json:"disabled,omitempty"` } -// FeatureGateDetailsApplyConfiguration constructs an declarative configuration of the FeatureGateDetails type for use with +// FeatureGateDetailsApplyConfiguration constructs a declarative configuration of the FeatureGateDetails type for use with // apply. func FeatureGateDetails() *FeatureGateDetailsApplyConfiguration { return &FeatureGateDetailsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go index f22ead2c0..2aac4666e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// FeatureGateSelectionApplyConfiguration represents an declarative configuration of the FeatureGateSelection type for use +// FeatureGateSelectionApplyConfiguration represents a declarative configuration of the FeatureGateSelection type for use // with apply. type FeatureGateSelectionApplyConfiguration struct { FeatureSet *v1.FeatureSet `json:"featureSet,omitempty"` CustomNoUpgrade *CustomFeatureGatesApplyConfiguration `json:"customNoUpgrade,omitempty"` } -// FeatureGateSelectionApplyConfiguration constructs an declarative configuration of the FeatureGateSelection type for use with +// FeatureGateSelectionApplyConfiguration constructs a declarative configuration of the FeatureGateSelection type for use with // apply. func FeatureGateSelection() *FeatureGateSelectionApplyConfiguration { return &FeatureGateSelectionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go index 71a746419..39b85b5dd 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go @@ -6,13 +6,13 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// FeatureGateSpecApplyConfiguration represents an declarative configuration of the FeatureGateSpec type for use +// FeatureGateSpecApplyConfiguration represents a declarative configuration of the FeatureGateSpec type for use // with apply. type FeatureGateSpecApplyConfiguration struct { FeatureGateSelectionApplyConfiguration `json:",inline"` } -// FeatureGateSpecApplyConfiguration constructs an declarative configuration of the FeatureGateSpec type for use with +// FeatureGateSpecApplyConfiguration constructs a declarative configuration of the FeatureGateSpec type for use with // apply. func FeatureGateSpec() *FeatureGateSpecApplyConfiguration { return &FeatureGateSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go index 9ffe735b7..2cc69267e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go @@ -3,17 +3,17 @@ package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FeatureGateStatusApplyConfiguration represents an declarative configuration of the FeatureGateStatus type for use +// FeatureGateStatusApplyConfiguration represents a declarative configuration of the FeatureGateStatus type for use // with apply. type FeatureGateStatusApplyConfiguration struct { - Conditions []v1.Condition `json:"conditions,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` FeatureGates []FeatureGateDetailsApplyConfiguration `json:"featureGates,omitempty"` } -// FeatureGateStatusApplyConfiguration constructs an declarative configuration of the FeatureGateStatus type for use with +// FeatureGateStatusApplyConfiguration constructs a declarative configuration of the FeatureGateStatus type for use with // apply. func FeatureGateStatus() *FeatureGateStatusApplyConfiguration { return &FeatureGateStatusApplyConfiguration{} @@ -22,9 +22,12 @@ func FeatureGateStatus() *FeatureGateStatusApplyConfiguration { // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...v1.Condition) *FeatureGateStatusApplyConfiguration { +func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *FeatureGateStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go index 9e35e3c60..9c28888cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go @@ -2,7 +2,7 @@ package v1 -// GCPPlatformStatusApplyConfiguration represents an declarative configuration of the GCPPlatformStatus type for use +// GCPPlatformStatusApplyConfiguration represents a declarative configuration of the GCPPlatformStatus type for use // with apply. type GCPPlatformStatusApplyConfiguration struct { ProjectID *string `json:"projectID,omitempty"` @@ -12,7 +12,7 @@ type GCPPlatformStatusApplyConfiguration struct { CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` } -// GCPPlatformStatusApplyConfiguration constructs an declarative configuration of the GCPPlatformStatus type for use with +// GCPPlatformStatusApplyConfiguration constructs a declarative configuration of the GCPPlatformStatus type for use with // apply. func GCPPlatformStatus() *GCPPlatformStatusApplyConfiguration { return &GCPPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go index 685b14fe1..5d408e45e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go @@ -2,14 +2,14 @@ package v1 -// GCPResourceLabelApplyConfiguration represents an declarative configuration of the GCPResourceLabel type for use +// GCPResourceLabelApplyConfiguration represents a declarative configuration of the GCPResourceLabel type for use // with apply. type GCPResourceLabelApplyConfiguration struct { Key *string `json:"key,omitempty"` Value *string `json:"value,omitempty"` } -// GCPResourceLabelApplyConfiguration constructs an declarative configuration of the GCPResourceLabel type for use with +// GCPResourceLabelApplyConfiguration constructs a declarative configuration of the GCPResourceLabel type for use with // apply. func GCPResourceLabel() *GCPResourceLabelApplyConfiguration { return &GCPResourceLabelApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go index 9611b2853..8f22d3a54 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go @@ -2,7 +2,7 @@ package v1 -// GCPResourceTagApplyConfiguration represents an declarative configuration of the GCPResourceTag type for use +// GCPResourceTagApplyConfiguration represents a declarative configuration of the GCPResourceTag type for use // with apply. type GCPResourceTagApplyConfiguration struct { ParentID *string `json:"parentID,omitempty"` @@ -10,7 +10,7 @@ type GCPResourceTagApplyConfiguration struct { Value *string `json:"value,omitempty"` } -// GCPResourceTagApplyConfiguration constructs an declarative configuration of the GCPResourceTag type for use with +// GCPResourceTagApplyConfiguration constructs a declarative configuration of the GCPResourceTag type for use with // apply. func GCPResourceTag() *GCPResourceTagApplyConfiguration { return &GCPResourceTagApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go index bdaa2c7ac..c797463d3 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go @@ -2,7 +2,7 @@ package v1 -// GitHubIdentityProviderApplyConfiguration represents an declarative configuration of the GitHubIdentityProvider type for use +// GitHubIdentityProviderApplyConfiguration represents a declarative configuration of the GitHubIdentityProvider type for use // with apply. type GitHubIdentityProviderApplyConfiguration struct { ClientID *string `json:"clientID,omitempty"` @@ -13,7 +13,7 @@ type GitHubIdentityProviderApplyConfiguration struct { CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` } -// GitHubIdentityProviderApplyConfiguration constructs an declarative configuration of the GitHubIdentityProvider type for use with +// GitHubIdentityProviderApplyConfiguration constructs a declarative configuration of the GitHubIdentityProvider type for use with // apply. func GitHubIdentityProvider() *GitHubIdentityProviderApplyConfiguration { return &GitHubIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go index ece6b0eef..e6a542e1c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go @@ -2,7 +2,7 @@ package v1 -// GitLabIdentityProviderApplyConfiguration represents an declarative configuration of the GitLabIdentityProvider type for use +// GitLabIdentityProviderApplyConfiguration represents a declarative configuration of the GitLabIdentityProvider type for use // with apply. type GitLabIdentityProviderApplyConfiguration struct { ClientID *string `json:"clientID,omitempty"` @@ -11,7 +11,7 @@ type GitLabIdentityProviderApplyConfiguration struct { CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` } -// GitLabIdentityProviderApplyConfiguration constructs an declarative configuration of the GitLabIdentityProvider type for use with +// GitLabIdentityProviderApplyConfiguration constructs a declarative configuration of the GitLabIdentityProvider type for use with // apply. func GitLabIdentityProvider() *GitLabIdentityProviderApplyConfiguration { return &GitLabIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go index 1d38e5845..d82868069 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go @@ -2,7 +2,7 @@ package v1 -// GoogleIdentityProviderApplyConfiguration represents an declarative configuration of the GoogleIdentityProvider type for use +// GoogleIdentityProviderApplyConfiguration represents a declarative configuration of the GoogleIdentityProvider type for use // with apply. type GoogleIdentityProviderApplyConfiguration struct { ClientID *string `json:"clientID,omitempty"` @@ -10,7 +10,7 @@ type GoogleIdentityProviderApplyConfiguration struct { HostedDomain *string `json:"hostedDomain,omitempty"` } -// GoogleIdentityProviderApplyConfiguration constructs an declarative configuration of the GoogleIdentityProvider type for use with +// GoogleIdentityProviderApplyConfiguration constructs a declarative configuration of the GoogleIdentityProvider type for use with // apply. func GoogleIdentityProvider() *GoogleIdentityProviderApplyConfiguration { return &GoogleIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go index 719b87435..f5c689bbe 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go @@ -2,13 +2,13 @@ package v1 -// HTPasswdIdentityProviderApplyConfiguration represents an declarative configuration of the HTPasswdIdentityProvider type for use +// HTPasswdIdentityProviderApplyConfiguration represents a declarative configuration of the HTPasswdIdentityProvider type for use // with apply. type HTPasswdIdentityProviderApplyConfiguration struct { FileData *SecretNameReferenceApplyConfiguration `json:"fileData,omitempty"` } -// HTPasswdIdentityProviderApplyConfiguration constructs an declarative configuration of the HTPasswdIdentityProvider type for use with +// HTPasswdIdentityProviderApplyConfiguration constructs a declarative configuration of the HTPasswdIdentityProvider type for use with // apply. func HTPasswdIdentityProvider() *HTPasswdIdentityProviderApplyConfiguration { return &HTPasswdIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go index 6b0683b9f..333802bfe 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go @@ -2,14 +2,14 @@ package v1 -// HubSourceApplyConfiguration represents an declarative configuration of the HubSource type for use +// HubSourceApplyConfiguration represents a declarative configuration of the HubSource type for use // with apply. type HubSourceApplyConfiguration struct { Name *string `json:"name,omitempty"` Disabled *bool `json:"disabled,omitempty"` } -// HubSourceApplyConfiguration constructs an declarative configuration of the HubSource type for use with +// HubSourceApplyConfiguration constructs a declarative configuration of the HubSource type for use with // apply. func HubSource() *HubSourceApplyConfiguration { return &HubSourceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go index 6c466f96e..4a8f0c437 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go @@ -2,7 +2,7 @@ package v1 -// HubSourceStatusApplyConfiguration represents an declarative configuration of the HubSourceStatus type for use +// HubSourceStatusApplyConfiguration represents a declarative configuration of the HubSourceStatus type for use // with apply. type HubSourceStatusApplyConfiguration struct { *HubSourceApplyConfiguration `json:"HubSource,omitempty"` @@ -10,7 +10,7 @@ type HubSourceStatusApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HubSourceStatusApplyConfiguration constructs an declarative configuration of the HubSourceStatus type for use with +// HubSourceStatusApplyConfiguration constructs a declarative configuration of the HubSourceStatus type for use with // apply. func HubSourceStatus() *HubSourceStatusApplyConfiguration { return &HubSourceStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go index 9d1933377..b4b3be2ff 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// IBMCloudPlatformStatusApplyConfiguration represents an declarative configuration of the IBMCloudPlatformStatus type for use +// IBMCloudPlatformStatusApplyConfiguration represents a declarative configuration of the IBMCloudPlatformStatus type for use // with apply. type IBMCloudPlatformStatusApplyConfiguration struct { Location *string `json:"location,omitempty"` @@ -17,7 +17,7 @@ type IBMCloudPlatformStatusApplyConfiguration struct { ServiceEndpoints []IBMCloudServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` } -// IBMCloudPlatformStatusApplyConfiguration constructs an declarative configuration of the IBMCloudPlatformStatus type for use with +// IBMCloudPlatformStatusApplyConfiguration constructs a declarative configuration of the IBMCloudPlatformStatus type for use with // apply. func IBMCloudPlatformStatus() *IBMCloudPlatformStatusApplyConfiguration { return &IBMCloudPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go index 7fc9f8632..229104061 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// IBMCloudServiceEndpointApplyConfiguration represents an declarative configuration of the IBMCloudServiceEndpoint type for use +// IBMCloudServiceEndpointApplyConfiguration represents a declarative configuration of the IBMCloudServiceEndpoint type for use // with apply. type IBMCloudServiceEndpointApplyConfiguration struct { Name *v1.IBMCloudServiceName `json:"name,omitempty"` URL *string `json:"url,omitempty"` } -// IBMCloudServiceEndpointApplyConfiguration constructs an declarative configuration of the IBMCloudServiceEndpoint type for use with +// IBMCloudServiceEndpointApplyConfiguration constructs a declarative configuration of the IBMCloudServiceEndpoint type for use with // apply. func IBMCloudServiceEndpoint() *IBMCloudServiceEndpointApplyConfiguration { return &IBMCloudServiceEndpointApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go index 869d822bb..35edd9dff 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// IdentityProviderApplyConfiguration represents an declarative configuration of the IdentityProvider type for use +// IdentityProviderApplyConfiguration represents a declarative configuration of the IdentityProvider type for use // with apply. type IdentityProviderApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -14,7 +14,7 @@ type IdentityProviderApplyConfiguration struct { IdentityProviderConfigApplyConfiguration `json:",inline"` } -// IdentityProviderApplyConfiguration constructs an declarative configuration of the IdentityProvider type for use with +// IdentityProviderApplyConfiguration constructs a declarative configuration of the IdentityProvider type for use with // apply. func IdentityProvider() *IdentityProviderApplyConfiguration { return &IdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go index e87c12287..208d23d56 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// IdentityProviderConfigApplyConfiguration represents an declarative configuration of the IdentityProviderConfig type for use +// IdentityProviderConfigApplyConfiguration represents a declarative configuration of the IdentityProviderConfig type for use // with apply. type IdentityProviderConfigApplyConfiguration struct { Type *v1.IdentityProviderType `json:"type,omitempty"` @@ -21,7 +21,7 @@ type IdentityProviderConfigApplyConfiguration struct { RequestHeader *RequestHeaderIdentityProviderApplyConfiguration `json:"requestHeader,omitempty"` } -// IdentityProviderConfigApplyConfiguration constructs an declarative configuration of the IdentityProviderConfig type for use with +// IdentityProviderConfigApplyConfiguration constructs a declarative configuration of the IdentityProviderConfig type for use with // apply. func IdentityProviderConfig() *IdentityProviderConfigApplyConfiguration { return &IdentityProviderConfigApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go index f93741283..34f73ad72 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageApplyConfiguration represents an declarative configuration of the Image type for use +// ImageApplyConfiguration represents a declarative configuration of the Image type for use // with apply. type ImageApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ImageApplyConfiguration struct { Status *ImageStatusApplyConfiguration `json:"status,omitempty"` } -// Image constructs an declarative configuration of the Image type for use with +// Image constructs a declarative configuration of the Image type for use with // apply. func Image(name string) *ImageApplyConfiguration { b := &ImageApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ImageApplyConfiguration) WithStatus(value *ImageStatusApplyConfiguratio b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go index bded262d7..9566569f9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageContentPolicyApplyConfiguration represents an declarative configuration of the ImageContentPolicy type for use +// ImageContentPolicyApplyConfiguration represents a declarative configuration of the ImageContentPolicy type for use // with apply. type ImageContentPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -19,7 +19,7 @@ type ImageContentPolicyApplyConfiguration struct { Spec *ImageContentPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// ImageContentPolicy constructs an declarative configuration of the ImageContentPolicy type for use with +// ImageContentPolicy constructs a declarative configuration of the ImageContentPolicy type for use with // apply. func ImageContentPolicy(name string) *ImageContentPolicyApplyConfiguration { b := &ImageContentPolicyApplyConfiguration{} @@ -229,3 +229,9 @@ func (b *ImageContentPolicyApplyConfiguration) WithSpec(value *ImageContentPolic b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageContentPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go index 5f063096f..ea674157c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go @@ -2,13 +2,13 @@ package v1 -// ImageContentPolicySpecApplyConfiguration represents an declarative configuration of the ImageContentPolicySpec type for use +// ImageContentPolicySpecApplyConfiguration represents a declarative configuration of the ImageContentPolicySpec type for use // with apply. type ImageContentPolicySpecApplyConfiguration struct { RepositoryDigestMirrors []RepositoryDigestMirrorsApplyConfiguration `json:"repositoryDigestMirrors,omitempty"` } -// ImageContentPolicySpecApplyConfiguration constructs an declarative configuration of the ImageContentPolicySpec type for use with +// ImageContentPolicySpecApplyConfiguration constructs a declarative configuration of the ImageContentPolicySpec type for use with // apply. func ImageContentPolicySpec() *ImageContentPolicySpecApplyConfiguration { return &ImageContentPolicySpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go index 47aa3bb82..dc9889674 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ImageDigestMirrorsApplyConfiguration represents an declarative configuration of the ImageDigestMirrors type for use +// ImageDigestMirrorsApplyConfiguration represents a declarative configuration of the ImageDigestMirrors type for use // with apply. type ImageDigestMirrorsApplyConfiguration struct { Source *string `json:"source,omitempty"` @@ -14,7 +14,7 @@ type ImageDigestMirrorsApplyConfiguration struct { MirrorSourcePolicy *v1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` } -// ImageDigestMirrorsApplyConfiguration constructs an declarative configuration of the ImageDigestMirrors type for use with +// ImageDigestMirrorsApplyConfiguration constructs a declarative configuration of the ImageDigestMirrors type for use with // apply. func ImageDigestMirrors() *ImageDigestMirrorsApplyConfiguration { return &ImageDigestMirrorsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go index b1e2aab4f..80140961b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageDigestMirrorSetApplyConfiguration represents an declarative configuration of the ImageDigestMirrorSet type for use +// ImageDigestMirrorSetApplyConfiguration represents a declarative configuration of the ImageDigestMirrorSet type for use // with apply. type ImageDigestMirrorSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ImageDigestMirrorSetApplyConfiguration struct { Status *apiconfigv1.ImageDigestMirrorSetStatus `json:"status,omitempty"` } -// ImageDigestMirrorSet constructs an declarative configuration of the ImageDigestMirrorSet type for use with +// ImageDigestMirrorSet constructs a declarative configuration of the ImageDigestMirrorSet type for use with // apply. func ImageDigestMirrorSet(name string) *ImageDigestMirrorSetApplyConfiguration { b := &ImageDigestMirrorSetApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.Im b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageDigestMirrorSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go index f34a0c0af..fbb9d48ca 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go @@ -2,13 +2,13 @@ package v1 -// ImageDigestMirrorSetSpecApplyConfiguration represents an declarative configuration of the ImageDigestMirrorSetSpec type for use +// ImageDigestMirrorSetSpecApplyConfiguration represents a declarative configuration of the ImageDigestMirrorSetSpec type for use // with apply. type ImageDigestMirrorSetSpecApplyConfiguration struct { ImageDigestMirrors []ImageDigestMirrorsApplyConfiguration `json:"imageDigestMirrors,omitempty"` } -// ImageDigestMirrorSetSpecApplyConfiguration constructs an declarative configuration of the ImageDigestMirrorSetSpec type for use with +// ImageDigestMirrorSetSpecApplyConfiguration constructs a declarative configuration of the ImageDigestMirrorSetSpec type for use with // apply. func ImageDigestMirrorSetSpec() *ImageDigestMirrorSetSpecApplyConfiguration { return &ImageDigestMirrorSetSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go index 1199666c4..1d1910547 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go @@ -2,14 +2,14 @@ package v1 -// ImageLabelApplyConfiguration represents an declarative configuration of the ImageLabel type for use +// ImageLabelApplyConfiguration represents a declarative configuration of the ImageLabel type for use // with apply. type ImageLabelApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// ImageLabelApplyConfiguration constructs an declarative configuration of the ImageLabel type for use with +// ImageLabelApplyConfiguration constructs a declarative configuration of the ImageLabel type for use with // apply. func ImageLabel() *ImageLabelApplyConfiguration { return &ImageLabelApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go index 10e80e77f..2c3bf2687 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go @@ -2,16 +2,21 @@ package v1 -// ImageSpecApplyConfiguration represents an declarative configuration of the ImageSpec type for use +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// ImageSpecApplyConfiguration represents a declarative configuration of the ImageSpec type for use // with apply. type ImageSpecApplyConfiguration struct { AllowedRegistriesForImport []RegistryLocationApplyConfiguration `json:"allowedRegistriesForImport,omitempty"` ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` AdditionalTrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"additionalTrustedCA,omitempty"` RegistrySources *RegistrySourcesApplyConfiguration `json:"registrySources,omitempty"` + ImageStreamImportMode *configv1.ImportModeType `json:"imageStreamImportMode,omitempty"` } -// ImageSpecApplyConfiguration constructs an declarative configuration of the ImageSpec type for use with +// ImageSpecApplyConfiguration constructs a declarative configuration of the ImageSpec type for use with // apply. func ImageSpec() *ImageSpecApplyConfiguration { return &ImageSpecApplyConfiguration{} @@ -55,3 +60,11 @@ func (b *ImageSpecApplyConfiguration) WithRegistrySources(value *RegistrySources b.RegistrySources = value return b } + +// WithImageStreamImportMode sets the ImageStreamImportMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageStreamImportMode field is set to the value of the last call. +func (b *ImageSpecApplyConfiguration) WithImageStreamImportMode(value configv1.ImportModeType) *ImageSpecApplyConfiguration { + b.ImageStreamImportMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go index 38c90271a..601accbe6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go @@ -2,14 +2,19 @@ package v1 -// ImageStatusApplyConfiguration represents an declarative configuration of the ImageStatus type for use +import ( + v1 "github.com/openshift/api/config/v1" +) + +// ImageStatusApplyConfiguration represents a declarative configuration of the ImageStatus type for use // with apply. type ImageStatusApplyConfiguration struct { - InternalRegistryHostname *string `json:"internalRegistryHostname,omitempty"` - ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + InternalRegistryHostname *string `json:"internalRegistryHostname,omitempty"` + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + ImageStreamImportMode *v1.ImportModeType `json:"imageStreamImportMode,omitempty"` } -// ImageStatusApplyConfiguration constructs an declarative configuration of the ImageStatus type for use with +// ImageStatusApplyConfiguration constructs a declarative configuration of the ImageStatus type for use with // apply. func ImageStatus() *ImageStatusApplyConfiguration { return &ImageStatusApplyConfiguration{} @@ -32,3 +37,11 @@ func (b *ImageStatusApplyConfiguration) WithExternalRegistryHostnames(values ... } return b } + +// WithImageStreamImportMode sets the ImageStreamImportMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageStreamImportMode field is set to the value of the last call. +func (b *ImageStatusApplyConfiguration) WithImageStreamImportMode(value v1.ImportModeType) *ImageStatusApplyConfiguration { + b.ImageStreamImportMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go index 212d0d01a..bede604d8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ImageTagMirrorsApplyConfiguration represents an declarative configuration of the ImageTagMirrors type for use +// ImageTagMirrorsApplyConfiguration represents a declarative configuration of the ImageTagMirrors type for use // with apply. type ImageTagMirrorsApplyConfiguration struct { Source *string `json:"source,omitempty"` @@ -14,7 +14,7 @@ type ImageTagMirrorsApplyConfiguration struct { MirrorSourcePolicy *v1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` } -// ImageTagMirrorsApplyConfiguration constructs an declarative configuration of the ImageTagMirrors type for use with +// ImageTagMirrorsApplyConfiguration constructs a declarative configuration of the ImageTagMirrors type for use with // apply. func ImageTagMirrors() *ImageTagMirrorsApplyConfiguration { return &ImageTagMirrorsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go index 1b85cc600..37432fac0 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageTagMirrorSetApplyConfiguration represents an declarative configuration of the ImageTagMirrorSet type for use +// ImageTagMirrorSetApplyConfiguration represents a declarative configuration of the ImageTagMirrorSet type for use // with apply. type ImageTagMirrorSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ImageTagMirrorSetApplyConfiguration struct { Status *apiconfigv1.ImageTagMirrorSetStatus `json:"status,omitempty"` } -// ImageTagMirrorSet constructs an declarative configuration of the ImageTagMirrorSet type for use with +// ImageTagMirrorSet constructs a declarative configuration of the ImageTagMirrorSet type for use with // apply. func ImageTagMirrorSet(name string) *ImageTagMirrorSetApplyConfiguration { b := &ImageTagMirrorSetApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.Image b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageTagMirrorSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go index a6e1d9a39..ca59c3871 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go @@ -2,13 +2,13 @@ package v1 -// ImageTagMirrorSetSpecApplyConfiguration represents an declarative configuration of the ImageTagMirrorSetSpec type for use +// ImageTagMirrorSetSpecApplyConfiguration represents a declarative configuration of the ImageTagMirrorSetSpec type for use // with apply. type ImageTagMirrorSetSpecApplyConfiguration struct { ImageTagMirrors []ImageTagMirrorsApplyConfiguration `json:"imageTagMirrors,omitempty"` } -// ImageTagMirrorSetSpecApplyConfiguration constructs an declarative configuration of the ImageTagMirrorSetSpec type for use with +// ImageTagMirrorSetSpecApplyConfiguration constructs a declarative configuration of the ImageTagMirrorSetSpec type for use with // apply. func ImageTagMirrorSetSpec() *ImageTagMirrorSetSpecApplyConfiguration { return &ImageTagMirrorSetSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go index 9a1fe21bd..5d9551be6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// InfrastructureApplyConfiguration represents an declarative configuration of the Infrastructure type for use +// InfrastructureApplyConfiguration represents a declarative configuration of the Infrastructure type for use // with apply. type InfrastructureApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type InfrastructureApplyConfiguration struct { Status *InfrastructureStatusApplyConfiguration `json:"status,omitempty"` } -// Infrastructure constructs an declarative configuration of the Infrastructure type for use with +// Infrastructure constructs a declarative configuration of the Infrastructure type for use with // apply. func Infrastructure(name string) *InfrastructureApplyConfiguration { b := &InfrastructureApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *InfrastructureApplyConfiguration) WithStatus(value *InfrastructureStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InfrastructureApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go index eb2f1636d..83dccde29 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go @@ -2,14 +2,14 @@ package v1 -// InfrastructureSpecApplyConfiguration represents an declarative configuration of the InfrastructureSpec type for use +// InfrastructureSpecApplyConfiguration represents a declarative configuration of the InfrastructureSpec type for use // with apply. type InfrastructureSpecApplyConfiguration struct { CloudConfig *ConfigMapFileReferenceApplyConfiguration `json:"cloudConfig,omitempty"` PlatformSpec *PlatformSpecApplyConfiguration `json:"platformSpec,omitempty"` } -// InfrastructureSpecApplyConfiguration constructs an declarative configuration of the InfrastructureSpec type for use with +// InfrastructureSpecApplyConfiguration constructs a declarative configuration of the InfrastructureSpec type for use with // apply. func InfrastructureSpec() *InfrastructureSpecApplyConfiguration { return &InfrastructureSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go index 0f45b5562..89f5496df 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// InfrastructureStatusApplyConfiguration represents an declarative configuration of the InfrastructureStatus type for use +// InfrastructureStatusApplyConfiguration represents a declarative configuration of the InfrastructureStatus type for use // with apply. type InfrastructureStatusApplyConfiguration struct { InfrastructureName *string `json:"infrastructureName,omitempty"` @@ -20,7 +20,7 @@ type InfrastructureStatusApplyConfiguration struct { CPUPartitioning *v1.CPUPartitioningMode `json:"cpuPartitioning,omitempty"` } -// InfrastructureStatusApplyConfiguration constructs an declarative configuration of the InfrastructureStatus type for use with +// InfrastructureStatusApplyConfiguration constructs a declarative configuration of the InfrastructureStatus type for use with // apply. func InfrastructureStatus() *InfrastructureStatusApplyConfiguration { return &InfrastructureStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go index c3fb8b814..b8780886d 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go index ae1b18fd3..f3e25215b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// IngressPlatformSpecApplyConfiguration represents an declarative configuration of the IngressPlatformSpec type for use +// IngressPlatformSpecApplyConfiguration represents a declarative configuration of the IngressPlatformSpec type for use // with apply. type IngressPlatformSpecApplyConfiguration struct { Type *v1.PlatformType `json:"type,omitempty"` AWS *AWSIngressSpecApplyConfiguration `json:"aws,omitempty"` } -// IngressPlatformSpecApplyConfiguration constructs an declarative configuration of the IngressPlatformSpec type for use with +// IngressPlatformSpecApplyConfiguration constructs a declarative configuration of the IngressPlatformSpec type for use with // apply. func IngressPlatformSpec() *IngressPlatformSpecApplyConfiguration { return &IngressPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go index d934e664b..a9b09512c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go @@ -2,7 +2,7 @@ package v1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { Domain *string `json:"domain,omitempty"` @@ -12,7 +12,7 @@ type IngressSpecApplyConfiguration struct { LoadBalancer *LoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go index 7fb9917af..792bcd755 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go @@ -6,14 +6,14 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { ComponentRoutes []ComponentRouteStatusApplyConfiguration `json:"componentRoutes,omitempty"` DefaultPlacement *configv1.DefaultPlacement `json:"defaultPlacement,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go index 4f4ddd375..88204c69a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go @@ -2,14 +2,14 @@ package v1 -// KeystoneIdentityProviderApplyConfiguration represents an declarative configuration of the KeystoneIdentityProvider type for use +// KeystoneIdentityProviderApplyConfiguration represents a declarative configuration of the KeystoneIdentityProvider type for use // with apply. type KeystoneIdentityProviderApplyConfiguration struct { OAuthRemoteConnectionInfoApplyConfiguration `json:",inline"` DomainName *string `json:"domainName,omitempty"` } -// KeystoneIdentityProviderApplyConfiguration constructs an declarative configuration of the KeystoneIdentityProvider type for use with +// KeystoneIdentityProviderApplyConfiguration constructs a declarative configuration of the KeystoneIdentityProvider type for use with // apply. func KeystoneIdentityProvider() *KeystoneIdentityProviderApplyConfiguration { return &KeystoneIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go index 8e092abd2..3d136c53b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go @@ -2,14 +2,14 @@ package v1 -// KubevirtPlatformStatusApplyConfiguration represents an declarative configuration of the KubevirtPlatformStatus type for use +// KubevirtPlatformStatusApplyConfiguration represents a declarative configuration of the KubevirtPlatformStatus type for use // with apply. type KubevirtPlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` IngressIP *string `json:"ingressIP,omitempty"` } -// KubevirtPlatformStatusApplyConfiguration constructs an declarative configuration of the KubevirtPlatformStatus type for use with +// KubevirtPlatformStatusApplyConfiguration constructs a declarative configuration of the KubevirtPlatformStatus type for use with // apply. func KubevirtPlatformStatus() *KubevirtPlatformStatusApplyConfiguration { return &KubevirtPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go index 34a8916be..b618065ce 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go @@ -2,7 +2,7 @@ package v1 -// LDAPAttributeMappingApplyConfiguration represents an declarative configuration of the LDAPAttributeMapping type for use +// LDAPAttributeMappingApplyConfiguration represents a declarative configuration of the LDAPAttributeMapping type for use // with apply. type LDAPAttributeMappingApplyConfiguration struct { ID []string `json:"id,omitempty"` @@ -11,7 +11,7 @@ type LDAPAttributeMappingApplyConfiguration struct { Email []string `json:"email,omitempty"` } -// LDAPAttributeMappingApplyConfiguration constructs an declarative configuration of the LDAPAttributeMapping type for use with +// LDAPAttributeMappingApplyConfiguration constructs a declarative configuration of the LDAPAttributeMapping type for use with // apply. func LDAPAttributeMapping() *LDAPAttributeMappingApplyConfiguration { return &LDAPAttributeMappingApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go index 9ab1b90ef..90bdfe34c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go @@ -2,7 +2,7 @@ package v1 -// LDAPIdentityProviderApplyConfiguration represents an declarative configuration of the LDAPIdentityProvider type for use +// LDAPIdentityProviderApplyConfiguration represents a declarative configuration of the LDAPIdentityProvider type for use // with apply. type LDAPIdentityProviderApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -13,7 +13,7 @@ type LDAPIdentityProviderApplyConfiguration struct { Attributes *LDAPAttributeMappingApplyConfiguration `json:"attributes,omitempty"` } -// LDAPIdentityProviderApplyConfiguration constructs an declarative configuration of the LDAPIdentityProvider type for use with +// LDAPIdentityProviderApplyConfiguration constructs a declarative configuration of the LDAPIdentityProvider type for use with // apply. func LDAPIdentityProvider() *LDAPIdentityProviderApplyConfiguration { return &LDAPIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go index 6f8618760..0dfc67c8f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go @@ -2,13 +2,13 @@ package v1 -// LoadBalancerApplyConfiguration represents an declarative configuration of the LoadBalancer type for use +// LoadBalancerApplyConfiguration represents a declarative configuration of the LoadBalancer type for use // with apply. type LoadBalancerApplyConfiguration struct { Platform *IngressPlatformSpecApplyConfiguration `json:"platform,omitempty"` } -// LoadBalancerApplyConfiguration constructs an declarative configuration of the LoadBalancer type for use with +// LoadBalancerApplyConfiguration constructs a declarative configuration of the LoadBalancer type for use with // apply. func LoadBalancer() *LoadBalancerApplyConfiguration { return &LoadBalancerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go index 0712a0da7..faa8e1dd5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go @@ -2,14 +2,14 @@ package v1 -// MaxAgePolicyApplyConfiguration represents an declarative configuration of the MaxAgePolicy type for use +// MaxAgePolicyApplyConfiguration represents a declarative configuration of the MaxAgePolicy type for use // with apply. type MaxAgePolicyApplyConfiguration struct { LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` } -// MaxAgePolicyApplyConfiguration constructs an declarative configuration of the MaxAgePolicy type for use with +// MaxAgePolicyApplyConfiguration constructs a declarative configuration of the MaxAgePolicy type for use with // apply. func MaxAgePolicy() *MaxAgePolicyApplyConfiguration { return &MaxAgePolicyApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go index 23b85cf82..9db99100e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go @@ -2,14 +2,14 @@ package v1 -// MTUMigrationApplyConfiguration represents an declarative configuration of the MTUMigration type for use +// MTUMigrationApplyConfiguration represents a declarative configuration of the MTUMigration type for use // with apply. type MTUMigrationApplyConfiguration struct { Network *MTUMigrationValuesApplyConfiguration `json:"network,omitempty"` Machine *MTUMigrationValuesApplyConfiguration `json:"machine,omitempty"` } -// MTUMigrationApplyConfiguration constructs an declarative configuration of the MTUMigration type for use with +// MTUMigrationApplyConfiguration constructs a declarative configuration of the MTUMigration type for use with // apply. func MTUMigration() *MTUMigrationApplyConfiguration { return &MTUMigrationApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go index a1e185a55..8d346f25f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go @@ -2,14 +2,14 @@ package v1 -// MTUMigrationValuesApplyConfiguration represents an declarative configuration of the MTUMigrationValues type for use +// MTUMigrationValuesApplyConfiguration represents a declarative configuration of the MTUMigrationValues type for use // with apply. type MTUMigrationValuesApplyConfiguration struct { To *uint32 `json:"to,omitempty"` From *uint32 `json:"from,omitempty"` } -// MTUMigrationValuesApplyConfiguration constructs an declarative configuration of the MTUMigrationValues type for use with +// MTUMigrationValuesApplyConfiguration constructs a declarative configuration of the MTUMigrationValues type for use with // apply. func MTUMigrationValues() *MTUMigrationValuesApplyConfiguration { return &MTUMigrationValuesApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go index 6604b627f..a42f3a9ea 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkApplyConfiguration represents an declarative configuration of the Network type for use +// NetworkApplyConfiguration represents a declarative configuration of the Network type for use // with apply. type NetworkApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type NetworkApplyConfiguration struct { Status *NetworkStatusApplyConfiguration `json:"status,omitempty"` } -// Network constructs an declarative configuration of the Network type for use with +// Network constructs a declarative configuration of the Network type for use with // apply. func Network(name string) *NetworkApplyConfiguration { b := &NetworkApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go new file mode 100644 index 000000000..6f75e0385 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// NetworkDiagnosticsApplyConfiguration represents a declarative configuration of the NetworkDiagnostics type for use +// with apply. +type NetworkDiagnosticsApplyConfiguration struct { + Mode *v1.NetworkDiagnosticsMode `json:"mode,omitempty"` + SourcePlacement *NetworkDiagnosticsSourcePlacementApplyConfiguration `json:"sourcePlacement,omitempty"` + TargetPlacement *NetworkDiagnosticsTargetPlacementApplyConfiguration `json:"targetPlacement,omitempty"` +} + +// NetworkDiagnosticsApplyConfiguration constructs a declarative configuration of the NetworkDiagnostics type for use with +// apply. +func NetworkDiagnostics() *NetworkDiagnosticsApplyConfiguration { + return &NetworkDiagnosticsApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithMode(value v1.NetworkDiagnosticsMode) *NetworkDiagnosticsApplyConfiguration { + b.Mode = &value + return b +} + +// WithSourcePlacement sets the SourcePlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourcePlacement field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithSourcePlacement(value *NetworkDiagnosticsSourcePlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration { + b.SourcePlacement = value + return b +} + +// WithTargetPlacement sets the TargetPlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPlacement field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithTargetPlacement(value *NetworkDiagnosticsTargetPlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration { + b.TargetPlacement = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go new file mode 100644 index 000000000..2b280a828 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// NetworkDiagnosticsSourcePlacementApplyConfiguration represents a declarative configuration of the NetworkDiagnosticsSourcePlacement type for use +// with apply. +type NetworkDiagnosticsSourcePlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` +} + +// NetworkDiagnosticsSourcePlacementApplyConfiguration constructs a declarative configuration of the NetworkDiagnosticsSourcePlacement type for use with +// apply. +func NetworkDiagnosticsSourcePlacement() *NetworkDiagnosticsSourcePlacementApplyConfiguration { + return &NetworkDiagnosticsSourcePlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsSourcePlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsSourcePlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go new file mode 100644 index 000000000..6fee4c795 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// NetworkDiagnosticsTargetPlacementApplyConfiguration represents a declarative configuration of the NetworkDiagnosticsTargetPlacement type for use +// with apply. +type NetworkDiagnosticsTargetPlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` +} + +// NetworkDiagnosticsTargetPlacementApplyConfiguration constructs a declarative configuration of the NetworkDiagnosticsTargetPlacement type for use with +// apply. +func NetworkDiagnosticsTargetPlacement() *NetworkDiagnosticsTargetPlacementApplyConfiguration { + return &NetworkDiagnosticsTargetPlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsTargetPlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsTargetPlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go index c1ea6eade..9c8294746 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go @@ -2,14 +2,14 @@ package v1 -// NetworkMigrationApplyConfiguration represents an declarative configuration of the NetworkMigration type for use +// NetworkMigrationApplyConfiguration represents a declarative configuration of the NetworkMigration type for use // with apply. type NetworkMigrationApplyConfiguration struct { NetworkType *string `json:"networkType,omitempty"` MTU *MTUMigrationApplyConfiguration `json:"mtu,omitempty"` } -// NetworkMigrationApplyConfiguration constructs an declarative configuration of the NetworkMigration type for use with +// NetworkMigrationApplyConfiguration constructs a declarative configuration of the NetworkMigration type for use with // apply. func NetworkMigration() *NetworkMigrationApplyConfiguration { return &NetworkMigrationApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go index c74dc4d0c..d4e970e34 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go @@ -2,7 +2,7 @@ package v1 -// NetworkSpecApplyConfiguration represents an declarative configuration of the NetworkSpec type for use +// NetworkSpecApplyConfiguration represents a declarative configuration of the NetworkSpec type for use // with apply. type NetworkSpecApplyConfiguration struct { ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"` @@ -10,9 +10,10 @@ type NetworkSpecApplyConfiguration struct { NetworkType *string `json:"networkType,omitempty"` ExternalIP *ExternalIPConfigApplyConfiguration `json:"externalIP,omitempty"` ServiceNodePortRange *string `json:"serviceNodePortRange,omitempty"` + NetworkDiagnostics *NetworkDiagnosticsApplyConfiguration `json:"networkDiagnostics,omitempty"` } -// NetworkSpecApplyConfiguration constructs an declarative configuration of the NetworkSpec type for use with +// NetworkSpecApplyConfiguration constructs a declarative configuration of the NetworkSpec type for use with // apply. func NetworkSpec() *NetworkSpecApplyConfiguration { return &NetworkSpecApplyConfiguration{} @@ -64,3 +65,11 @@ func (b *NetworkSpecApplyConfiguration) WithServiceNodePortRange(value string) * b.ServiceNodePortRange = &value return b } + +// WithNetworkDiagnostics sets the NetworkDiagnostics field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkDiagnostics field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithNetworkDiagnostics(value *NetworkDiagnosticsApplyConfiguration) *NetworkSpecApplyConfiguration { + b.NetworkDiagnostics = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go index f30f3c4a1..de3697ed7 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go @@ -3,10 +3,10 @@ package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkStatusApplyConfiguration represents an declarative configuration of the NetworkStatus type for use +// NetworkStatusApplyConfiguration represents a declarative configuration of the NetworkStatus type for use // with apply. type NetworkStatusApplyConfiguration struct { ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"` @@ -14,10 +14,10 @@ type NetworkStatusApplyConfiguration struct { NetworkType *string `json:"networkType,omitempty"` ClusterNetworkMTU *int `json:"clusterNetworkMTU,omitempty"` Migration *NetworkMigrationApplyConfiguration `json:"migration,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// NetworkStatusApplyConfiguration constructs an declarative configuration of the NetworkStatus type for use with +// NetworkStatusApplyConfiguration constructs a declarative configuration of the NetworkStatus type for use with // apply. func NetworkStatus() *NetworkStatusApplyConfiguration { return &NetworkStatusApplyConfiguration{} @@ -73,9 +73,12 @@ func (b *NetworkStatusApplyConfiguration) WithMigration(value *NetworkMigrationA // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NetworkStatusApplyConfiguration) WithConditions(values ...metav1.Condition) *NetworkStatusApplyConfiguration { +func (b *NetworkStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *NetworkStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go index a407a9e45..ac4deeb66 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go @@ -11,16 +11,16 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NodeApplyConfiguration represents an declarative configuration of the Node type for use +// NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.NodeStatus `json:"status,omitempty"` + Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } -// Node constructs an declarative configuration of the Node type for use with +// Node constructs a declarative configuration of the Node type for use with // apply. func Node(name string) *NodeApplyConfiguration { b := &NodeApplyConfiguration{} @@ -234,7 +234,13 @@ func (b *NodeApplyConfiguration) WithSpec(value *NodeSpecApplyConfiguration) *No // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithStatus(value apiconfigv1.NodeStatus) *NodeApplyConfiguration { - b.Status = &value +func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) *NodeApplyConfiguration { + b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go index 3b7bf903b..37965a138 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"` WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` } -// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with // apply. func NodeSpec() *NodeSpecApplyConfiguration { return &NodeSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go new file mode 100644 index 000000000..e36466526 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodestatus.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use +// with apply. +type NodeStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with +// apply. +func NodeStatus() *NodeStatusApplyConfiguration { + return &NodeStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *NodeStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NodeStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go index 5af68e441..31d77a83e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go @@ -2,7 +2,7 @@ package v1 -// NutanixFailureDomainApplyConfiguration represents an declarative configuration of the NutanixFailureDomain type for use +// NutanixFailureDomainApplyConfiguration represents a declarative configuration of the NutanixFailureDomain type for use // with apply. type NutanixFailureDomainApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -10,7 +10,7 @@ type NutanixFailureDomainApplyConfiguration struct { Subnets []NutanixResourceIdentifierApplyConfiguration `json:"subnets,omitempty"` } -// NutanixFailureDomainApplyConfiguration constructs an declarative configuration of the NutanixFailureDomain type for use with +// NutanixFailureDomainApplyConfiguration constructs a declarative configuration of the NutanixFailureDomain type for use with // apply. func NutanixFailureDomain() *NutanixFailureDomainApplyConfiguration { return &NutanixFailureDomainApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go index 5ab68bb77..3578f512b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// NutanixPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the NutanixPlatformLoadBalancer type for use +// NutanixPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the NutanixPlatformLoadBalancer type for use // with apply. type NutanixPlatformLoadBalancerApplyConfiguration struct { Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` } -// NutanixPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the NutanixPlatformLoadBalancer type for use with +// NutanixPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the NutanixPlatformLoadBalancer type for use with // apply. func NutanixPlatformLoadBalancer() *NutanixPlatformLoadBalancerApplyConfiguration { return &NutanixPlatformLoadBalancerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go index d36708229..8f7cb9842 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go @@ -2,7 +2,7 @@ package v1 -// NutanixPlatformSpecApplyConfiguration represents an declarative configuration of the NutanixPlatformSpec type for use +// NutanixPlatformSpecApplyConfiguration represents a declarative configuration of the NutanixPlatformSpec type for use // with apply. type NutanixPlatformSpecApplyConfiguration struct { PrismCentral *NutanixPrismEndpointApplyConfiguration `json:"prismCentral,omitempty"` @@ -10,7 +10,7 @@ type NutanixPlatformSpecApplyConfiguration struct { FailureDomains []NutanixFailureDomainApplyConfiguration `json:"failureDomains,omitempty"` } -// NutanixPlatformSpecApplyConfiguration constructs an declarative configuration of the NutanixPlatformSpec type for use with +// NutanixPlatformSpecApplyConfiguration constructs a declarative configuration of the NutanixPlatformSpec type for use with // apply. func NutanixPlatformSpec() *NutanixPlatformSpecApplyConfiguration { return &NutanixPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go index 8dd8a6895..d7988e511 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go @@ -2,7 +2,7 @@ package v1 -// NutanixPlatformStatusApplyConfiguration represents an declarative configuration of the NutanixPlatformStatus type for use +// NutanixPlatformStatusApplyConfiguration represents a declarative configuration of the NutanixPlatformStatus type for use // with apply. type NutanixPlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` @@ -12,7 +12,7 @@ type NutanixPlatformStatusApplyConfiguration struct { LoadBalancer *NutanixPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } -// NutanixPlatformStatusApplyConfiguration constructs an declarative configuration of the NutanixPlatformStatus type for use with +// NutanixPlatformStatusApplyConfiguration constructs a declarative configuration of the NutanixPlatformStatus type for use with // apply. func NutanixPlatformStatus() *NutanixPlatformStatusApplyConfiguration { return &NutanixPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go index 3251b5343..2e59ff235 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go @@ -2,14 +2,14 @@ package v1 -// NutanixPrismElementEndpointApplyConfiguration represents an declarative configuration of the NutanixPrismElementEndpoint type for use +// NutanixPrismElementEndpointApplyConfiguration represents a declarative configuration of the NutanixPrismElementEndpoint type for use // with apply. type NutanixPrismElementEndpointApplyConfiguration struct { Name *string `json:"name,omitempty"` Endpoint *NutanixPrismEndpointApplyConfiguration `json:"endpoint,omitempty"` } -// NutanixPrismElementEndpointApplyConfiguration constructs an declarative configuration of the NutanixPrismElementEndpoint type for use with +// NutanixPrismElementEndpointApplyConfiguration constructs a declarative configuration of the NutanixPrismElementEndpoint type for use with // apply. func NutanixPrismElementEndpoint() *NutanixPrismElementEndpointApplyConfiguration { return &NutanixPrismElementEndpointApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go index a90157324..8012c2cb2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go @@ -2,14 +2,14 @@ package v1 -// NutanixPrismEndpointApplyConfiguration represents an declarative configuration of the NutanixPrismEndpoint type for use +// NutanixPrismEndpointApplyConfiguration represents a declarative configuration of the NutanixPrismEndpoint type for use // with apply. type NutanixPrismEndpointApplyConfiguration struct { Address *string `json:"address,omitempty"` Port *int32 `json:"port,omitempty"` } -// NutanixPrismEndpointApplyConfiguration constructs an declarative configuration of the NutanixPrismEndpoint type for use with +// NutanixPrismEndpointApplyConfiguration constructs a declarative configuration of the NutanixPrismEndpoint type for use with // apply. func NutanixPrismEndpoint() *NutanixPrismEndpointApplyConfiguration { return &NutanixPrismEndpointApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go index cb039c42e..f06b7c5d9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// NutanixResourceIdentifierApplyConfiguration represents an declarative configuration of the NutanixResourceIdentifier type for use +// NutanixResourceIdentifierApplyConfiguration represents a declarative configuration of the NutanixResourceIdentifier type for use // with apply. type NutanixResourceIdentifierApplyConfiguration struct { Type *v1.NutanixIdentifierType `json:"type,omitempty"` @@ -14,7 +14,7 @@ type NutanixResourceIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// NutanixResourceIdentifierApplyConfiguration constructs an declarative configuration of the NutanixResourceIdentifier type for use with +// NutanixResourceIdentifierApplyConfiguration constructs a declarative configuration of the NutanixResourceIdentifier type for use with // apply. func NutanixResourceIdentifier() *NutanixResourceIdentifierApplyConfiguration { return &NutanixResourceIdentifierApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go index 1067348bd..61d6739c4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// OAuthApplyConfiguration represents an declarative configuration of the OAuth type for use +// OAuthApplyConfiguration represents a declarative configuration of the OAuth type for use // with apply. type OAuthApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type OAuthApplyConfiguration struct { Status *apiconfigv1.OAuthStatus `json:"status,omitempty"` } -// OAuth constructs an declarative configuration of the OAuth type for use with +// OAuth constructs a declarative configuration of the OAuth type for use with // apply. func OAuth(name string) *OAuthApplyConfiguration { b := &OAuthApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *OAuthApplyConfiguration) WithStatus(value apiconfigv1.OAuthStatus) *OAu b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OAuthApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go index 5a1cca90f..3b348819d 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go @@ -2,7 +2,7 @@ package v1 -// OAuthRemoteConnectionInfoApplyConfiguration represents an declarative configuration of the OAuthRemoteConnectionInfo type for use +// OAuthRemoteConnectionInfoApplyConfiguration represents a declarative configuration of the OAuthRemoteConnectionInfo type for use // with apply. type OAuthRemoteConnectionInfoApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -11,7 +11,7 @@ type OAuthRemoteConnectionInfoApplyConfiguration struct { TLSClientKey *SecretNameReferenceApplyConfiguration `json:"tlsClientKey,omitempty"` } -// OAuthRemoteConnectionInfoApplyConfiguration constructs an declarative configuration of the OAuthRemoteConnectionInfo type for use with +// OAuthRemoteConnectionInfoApplyConfiguration constructs a declarative configuration of the OAuthRemoteConnectionInfo type for use with // apply. func OAuthRemoteConnectionInfo() *OAuthRemoteConnectionInfoApplyConfiguration { return &OAuthRemoteConnectionInfoApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go index 3fd987882..5eacc05cb 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go @@ -2,7 +2,7 @@ package v1 -// OAuthSpecApplyConfiguration represents an declarative configuration of the OAuthSpec type for use +// OAuthSpecApplyConfiguration represents a declarative configuration of the OAuthSpec type for use // with apply. type OAuthSpecApplyConfiguration struct { IdentityProviders []IdentityProviderApplyConfiguration `json:"identityProviders,omitempty"` @@ -10,7 +10,7 @@ type OAuthSpecApplyConfiguration struct { Templates *OAuthTemplatesApplyConfiguration `json:"templates,omitempty"` } -// OAuthSpecApplyConfiguration constructs an declarative configuration of the OAuthSpec type for use with +// OAuthSpecApplyConfiguration constructs a declarative configuration of the OAuthSpec type for use with // apply. func OAuthSpec() *OAuthSpecApplyConfiguration { return &OAuthSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go index 99b615e1b..98bc5a0db 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go @@ -2,7 +2,7 @@ package v1 -// OAuthTemplatesApplyConfiguration represents an declarative configuration of the OAuthTemplates type for use +// OAuthTemplatesApplyConfiguration represents a declarative configuration of the OAuthTemplates type for use // with apply. type OAuthTemplatesApplyConfiguration struct { Login *SecretNameReferenceApplyConfiguration `json:"login,omitempty"` @@ -10,7 +10,7 @@ type OAuthTemplatesApplyConfiguration struct { Error *SecretNameReferenceApplyConfiguration `json:"error,omitempty"` } -// OAuthTemplatesApplyConfiguration constructs an declarative configuration of the OAuthTemplates type for use with +// OAuthTemplatesApplyConfiguration constructs a declarative configuration of the OAuthTemplates type for use with // apply. func OAuthTemplates() *OAuthTemplatesApplyConfiguration { return &OAuthTemplatesApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go index fd46a832d..dfbc465e7 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go @@ -2,7 +2,7 @@ package v1 -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use // with apply. type ObjectReferenceApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -11,7 +11,7 @@ type ObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with // apply. func ObjectReference() *ObjectReferenceApplyConfiguration { return &ObjectReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go index 1a66c43aa..65fa3dd46 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go @@ -2,7 +2,7 @@ package v1 -// OIDCClientConfigApplyConfiguration represents an declarative configuration of the OIDCClientConfig type for use +// OIDCClientConfigApplyConfiguration represents a declarative configuration of the OIDCClientConfig type for use // with apply. type OIDCClientConfigApplyConfiguration struct { ComponentName *string `json:"componentName,omitempty"` @@ -12,7 +12,7 @@ type OIDCClientConfigApplyConfiguration struct { ExtraScopes []string `json:"extraScopes,omitempty"` } -// OIDCClientConfigApplyConfiguration constructs an declarative configuration of the OIDCClientConfig type for use with +// OIDCClientConfigApplyConfiguration constructs a declarative configuration of the OIDCClientConfig type for use with // apply. func OIDCClientConfig() *OIDCClientConfigApplyConfiguration { return &OIDCClientConfigApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go index 3c20508e5..5109305b2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go @@ -2,7 +2,7 @@ package v1 -// OIDCClientReferenceApplyConfiguration represents an declarative configuration of the OIDCClientReference type for use +// OIDCClientReferenceApplyConfiguration represents a declarative configuration of the OIDCClientReference type for use // with apply. type OIDCClientReferenceApplyConfiguration struct { OIDCProviderName *string `json:"oidcProviderName,omitempty"` @@ -10,7 +10,7 @@ type OIDCClientReferenceApplyConfiguration struct { ClientID *string `json:"clientID,omitempty"` } -// OIDCClientReferenceApplyConfiguration constructs an declarative configuration of the OIDCClientReference type for use with +// OIDCClientReferenceApplyConfiguration constructs a declarative configuration of the OIDCClientReference type for use with // apply. func OIDCClientReference() *OIDCClientReferenceApplyConfiguration { return &OIDCClientReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go index e4cf3f31c..5d365a87e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go @@ -4,20 +4,20 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// OIDCClientStatusApplyConfiguration represents an declarative configuration of the OIDCClientStatus type for use +// OIDCClientStatusApplyConfiguration represents a declarative configuration of the OIDCClientStatus type for use // with apply. type OIDCClientStatusApplyConfiguration struct { ComponentName *string `json:"componentName,omitempty"` ComponentNamespace *string `json:"componentNamespace,omitempty"` CurrentOIDCClients []OIDCClientReferenceApplyConfiguration `json:"currentOIDCClients,omitempty"` ConsumingUsers []configv1.ConsumingUser `json:"consumingUsers,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// OIDCClientStatusApplyConfiguration constructs an declarative configuration of the OIDCClientStatus type for use with +// OIDCClientStatusApplyConfiguration constructs a declarative configuration of the OIDCClientStatus type for use with // apply. func OIDCClientStatus() *OIDCClientStatusApplyConfiguration { return &OIDCClientStatusApplyConfiguration{} @@ -65,9 +65,12 @@ func (b *OIDCClientStatusApplyConfiguration) WithConsumingUsers(values ...config // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *OIDCClientStatusApplyConfiguration) WithConditions(values ...metav1.Condition) *OIDCClientStatusApplyConfiguration { +func (b *OIDCClientStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *OIDCClientStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go index d700ea5e1..7d9300367 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go @@ -2,7 +2,7 @@ package v1 -// OIDCProviderApplyConfiguration represents an declarative configuration of the OIDCProvider type for use +// OIDCProviderApplyConfiguration represents a declarative configuration of the OIDCProvider type for use // with apply. type OIDCProviderApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -12,7 +12,7 @@ type OIDCProviderApplyConfiguration struct { ClaimValidationRules []TokenClaimValidationRuleApplyConfiguration `json:"claimValidationRules,omitempty"` } -// OIDCProviderApplyConfiguration constructs an declarative configuration of the OIDCProvider type for use with +// OIDCProviderApplyConfiguration constructs a declarative configuration of the OIDCProvider type for use with // apply. func OIDCProvider() *OIDCProviderApplyConfiguration { return &OIDCProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go index ddaa7d505..18c2a768f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// OpenIDClaimsApplyConfiguration represents an declarative configuration of the OpenIDClaims type for use +// OpenIDClaimsApplyConfiguration represents a declarative configuration of the OpenIDClaims type for use // with apply. type OpenIDClaimsApplyConfiguration struct { PreferredUsername []string `json:"preferredUsername,omitempty"` @@ -15,7 +15,7 @@ type OpenIDClaimsApplyConfiguration struct { Groups []v1.OpenIDClaim `json:"groups,omitempty"` } -// OpenIDClaimsApplyConfiguration constructs an declarative configuration of the OpenIDClaims type for use with +// OpenIDClaimsApplyConfiguration constructs a declarative configuration of the OpenIDClaims type for use with // apply. func OpenIDClaims() *OpenIDClaimsApplyConfiguration { return &OpenIDClaimsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go index 6b143db8b..9372178cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go @@ -2,7 +2,7 @@ package v1 -// OpenIDIdentityProviderApplyConfiguration represents an declarative configuration of the OpenIDIdentityProvider type for use +// OpenIDIdentityProviderApplyConfiguration represents a declarative configuration of the OpenIDIdentityProvider type for use // with apply. type OpenIDIdentityProviderApplyConfiguration struct { ClientID *string `json:"clientID,omitempty"` @@ -14,7 +14,7 @@ type OpenIDIdentityProviderApplyConfiguration struct { Claims *OpenIDClaimsApplyConfiguration `json:"claims,omitempty"` } -// OpenIDIdentityProviderApplyConfiguration constructs an declarative configuration of the OpenIDIdentityProvider type for use with +// OpenIDIdentityProviderApplyConfiguration constructs a declarative configuration of the OpenIDIdentityProvider type for use with // apply. func OpenIDIdentityProvider() *OpenIDIdentityProviderApplyConfiguration { return &OpenIDIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go index 2eed83e1c..3e4990789 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// OpenStackPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the OpenStackPlatformLoadBalancer type for use +// OpenStackPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the OpenStackPlatformLoadBalancer type for use // with apply. type OpenStackPlatformLoadBalancerApplyConfiguration struct { Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` } -// OpenStackPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the OpenStackPlatformLoadBalancer type for use with +// OpenStackPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the OpenStackPlatformLoadBalancer type for use with // apply. func OpenStackPlatformLoadBalancer() *OpenStackPlatformLoadBalancerApplyConfiguration { return &OpenStackPlatformLoadBalancerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go index 3a54152ae..393a1bb93 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// OpenStackPlatformSpecApplyConfiguration represents an declarative configuration of the OpenStackPlatformSpec type for use +// OpenStackPlatformSpecApplyConfiguration represents a declarative configuration of the OpenStackPlatformSpec type for use // with apply. type OpenStackPlatformSpecApplyConfiguration struct { APIServerInternalIPs []v1.IP `json:"apiServerInternalIPs,omitempty"` @@ -14,7 +14,7 @@ type OpenStackPlatformSpecApplyConfiguration struct { MachineNetworks []v1.CIDR `json:"machineNetworks,omitempty"` } -// OpenStackPlatformSpecApplyConfiguration constructs an declarative configuration of the OpenStackPlatformSpec type for use with +// OpenStackPlatformSpecApplyConfiguration constructs a declarative configuration of the OpenStackPlatformSpec type for use with // apply. func OpenStackPlatformSpec() *OpenStackPlatformSpecApplyConfiguration { return &OpenStackPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go index 8cfab14db..f06c78e24 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go @@ -6,7 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// OpenStackPlatformStatusApplyConfiguration represents an declarative configuration of the OpenStackPlatformStatus type for use +// OpenStackPlatformStatusApplyConfiguration represents a declarative configuration of the OpenStackPlatformStatus type for use // with apply. type OpenStackPlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` @@ -19,7 +19,7 @@ type OpenStackPlatformStatusApplyConfiguration struct { MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } -// OpenStackPlatformStatusApplyConfiguration constructs an declarative configuration of the OpenStackPlatformStatus type for use with +// OpenStackPlatformStatusApplyConfiguration constructs a declarative configuration of the OpenStackPlatformStatus type for use with // apply. func OpenStackPlatformStatus() *OpenStackPlatformStatusApplyConfiguration { return &OpenStackPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go index 6d9fc37c8..6c4336d6e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go @@ -2,14 +2,14 @@ package v1 -// OperandVersionApplyConfiguration represents an declarative configuration of the OperandVersion type for use +// OperandVersionApplyConfiguration represents a declarative configuration of the OperandVersion type for use // with apply. type OperandVersionApplyConfiguration struct { Name *string `json:"name,omitempty"` Version *string `json:"version,omitempty"` } -// OperandVersionApplyConfiguration constructs an declarative configuration of the OperandVersion type for use with +// OperandVersionApplyConfiguration constructs a declarative configuration of the OperandVersion type for use with // apply. func OperandVersion() *OperandVersionApplyConfiguration { return &OperandVersionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go index 57f017a9d..bda9b75e8 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// OperatorHubApplyConfiguration represents an declarative configuration of the OperatorHub type for use +// OperatorHubApplyConfiguration represents a declarative configuration of the OperatorHub type for use // with apply. type OperatorHubApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type OperatorHubApplyConfiguration struct { Status *OperatorHubStatusApplyConfiguration `json:"status,omitempty"` } -// OperatorHub constructs an declarative configuration of the OperatorHub type for use with +// OperatorHub constructs a declarative configuration of the OperatorHub type for use with // apply. func OperatorHub(name string) *OperatorHubApplyConfiguration { b := &OperatorHubApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *OperatorHubApplyConfiguration) WithStatus(value *OperatorHubStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OperatorHubApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go index 831b0769e..56179c4cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go @@ -2,14 +2,14 @@ package v1 -// OperatorHubSpecApplyConfiguration represents an declarative configuration of the OperatorHubSpec type for use +// OperatorHubSpecApplyConfiguration represents a declarative configuration of the OperatorHubSpec type for use // with apply. type OperatorHubSpecApplyConfiguration struct { DisableAllDefaultSources *bool `json:"disableAllDefaultSources,omitempty"` Sources []HubSourceApplyConfiguration `json:"sources,omitempty"` } -// OperatorHubSpecApplyConfiguration constructs an declarative configuration of the OperatorHubSpec type for use with +// OperatorHubSpecApplyConfiguration constructs a declarative configuration of the OperatorHubSpec type for use with // apply. func OperatorHubSpec() *OperatorHubSpecApplyConfiguration { return &OperatorHubSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go index 86c134ff5..7e7cda1ac 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go @@ -2,13 +2,13 @@ package v1 -// OperatorHubStatusApplyConfiguration represents an declarative configuration of the OperatorHubStatus type for use +// OperatorHubStatusApplyConfiguration represents a declarative configuration of the OperatorHubStatus type for use // with apply. type OperatorHubStatusApplyConfiguration struct { Sources []HubSourceStatusApplyConfiguration `json:"sources,omitempty"` } -// OperatorHubStatusApplyConfiguration constructs an declarative configuration of the OperatorHubStatus type for use with +// OperatorHubStatusApplyConfiguration constructs a declarative configuration of the OperatorHubStatus type for use with // apply. func OperatorHubStatus() *OperatorHubStatusApplyConfiguration { return &OperatorHubStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go index 73c2a03a5..b679a55fd 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// OvirtPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the OvirtPlatformLoadBalancer type for use +// OvirtPlatformLoadBalancerApplyConfiguration represents a declarative configuration of the OvirtPlatformLoadBalancer type for use // with apply. type OvirtPlatformLoadBalancerApplyConfiguration struct { Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` } -// OvirtPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the OvirtPlatformLoadBalancer type for use with +// OvirtPlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the OvirtPlatformLoadBalancer type for use with // apply. func OvirtPlatformLoadBalancer() *OvirtPlatformLoadBalancerApplyConfiguration { return &OvirtPlatformLoadBalancerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go index 21bb6c842..18ad5d849 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go @@ -2,7 +2,7 @@ package v1 -// OvirtPlatformStatusApplyConfiguration represents an declarative configuration of the OvirtPlatformStatus type for use +// OvirtPlatformStatusApplyConfiguration represents a declarative configuration of the OvirtPlatformStatus type for use // with apply. type OvirtPlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` @@ -13,7 +13,7 @@ type OvirtPlatformStatusApplyConfiguration struct { LoadBalancer *OvirtPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } -// OvirtPlatformStatusApplyConfiguration constructs an declarative configuration of the OvirtPlatformStatus type for use with +// OvirtPlatformStatusApplyConfiguration constructs a declarative configuration of the OvirtPlatformStatus type for use with // apply. func OvirtPlatformStatus() *OvirtPlatformStatusApplyConfiguration { return &OvirtPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go index b5d001691..6ff5bcf0c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use +// PlatformSpecApplyConfiguration represents a declarative configuration of the PlatformSpec type for use // with apply. type PlatformSpecApplyConfiguration struct { Type *v1.PlatformType `json:"type,omitempty"` @@ -26,7 +26,7 @@ type PlatformSpecApplyConfiguration struct { External *ExternalPlatformSpecApplyConfiguration `json:"external,omitempty"` } -// PlatformSpecApplyConfiguration constructs an declarative configuration of the PlatformSpec type for use with +// PlatformSpecApplyConfiguration constructs a declarative configuration of the PlatformSpec type for use with // apply. func PlatformSpec() *PlatformSpecApplyConfiguration { return &PlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go index b6afa04a6..6519ef8b0 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// PlatformStatusApplyConfiguration represents an declarative configuration of the PlatformStatus type for use +// PlatformStatusApplyConfiguration represents a declarative configuration of the PlatformStatus type for use // with apply. type PlatformStatusApplyConfiguration struct { Type *v1.PlatformType `json:"type,omitempty"` @@ -26,7 +26,7 @@ type PlatformStatusApplyConfiguration struct { External *ExternalPlatformStatusApplyConfiguration `json:"external,omitempty"` } -// PlatformStatusApplyConfiguration constructs an declarative configuration of the PlatformStatus type for use with +// PlatformStatusApplyConfiguration constructs a declarative configuration of the PlatformStatus type for use with // apply. func PlatformStatus() *PlatformStatusApplyConfiguration { return &PlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go index c371a6a26..db3c3d1d9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go @@ -2,13 +2,13 @@ package v1 -// PowerVSPlatformSpecApplyConfiguration represents an declarative configuration of the PowerVSPlatformSpec type for use +// PowerVSPlatformSpecApplyConfiguration represents a declarative configuration of the PowerVSPlatformSpec type for use // with apply. type PowerVSPlatformSpecApplyConfiguration struct { ServiceEndpoints []PowerVSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` } -// PowerVSPlatformSpecApplyConfiguration constructs an declarative configuration of the PowerVSPlatformSpec type for use with +// PowerVSPlatformSpecApplyConfiguration constructs a declarative configuration of the PowerVSPlatformSpec type for use with // apply. func PowerVSPlatformSpec() *PowerVSPlatformSpecApplyConfiguration { return &PowerVSPlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go index c1660d005..f40099f16 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go @@ -2,7 +2,7 @@ package v1 -// PowerVSPlatformStatusApplyConfiguration represents an declarative configuration of the PowerVSPlatformStatus type for use +// PowerVSPlatformStatusApplyConfiguration represents a declarative configuration of the PowerVSPlatformStatus type for use // with apply. type PowerVSPlatformStatusApplyConfiguration struct { Region *string `json:"region,omitempty"` @@ -13,7 +13,7 @@ type PowerVSPlatformStatusApplyConfiguration struct { DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"` } -// PowerVSPlatformStatusApplyConfiguration constructs an declarative configuration of the PowerVSPlatformStatus type for use with +// PowerVSPlatformStatusApplyConfiguration constructs a declarative configuration of the PowerVSPlatformStatus type for use with // apply. func PowerVSPlatformStatus() *PowerVSPlatformStatusApplyConfiguration { return &PowerVSPlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go index ef262d38c..8fd231a2a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go @@ -2,14 +2,14 @@ package v1 -// PowerVSServiceEndpointApplyConfiguration represents an declarative configuration of the PowerVSServiceEndpoint type for use +// PowerVSServiceEndpointApplyConfiguration represents a declarative configuration of the PowerVSServiceEndpoint type for use // with apply. type PowerVSServiceEndpointApplyConfiguration struct { Name *string `json:"name,omitempty"` URL *string `json:"url,omitempty"` } -// PowerVSServiceEndpointApplyConfiguration constructs an declarative configuration of the PowerVSServiceEndpoint type for use with +// PowerVSServiceEndpointApplyConfiguration constructs a declarative configuration of the PowerVSServiceEndpoint type for use with // apply. func PowerVSServiceEndpoint() *PowerVSServiceEndpointApplyConfiguration { return &PowerVSServiceEndpointApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go index fedc364e3..bf10a6891 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go @@ -2,14 +2,14 @@ package v1 -// PrefixedClaimMappingApplyConfiguration represents an declarative configuration of the PrefixedClaimMapping type for use +// PrefixedClaimMappingApplyConfiguration represents a declarative configuration of the PrefixedClaimMapping type for use // with apply. type PrefixedClaimMappingApplyConfiguration struct { TokenClaimMappingApplyConfiguration `json:",inline"` Prefix *string `json:"prefix,omitempty"` } -// PrefixedClaimMappingApplyConfiguration constructs an declarative configuration of the PrefixedClaimMapping type for use with +// PrefixedClaimMappingApplyConfiguration constructs a declarative configuration of the PrefixedClaimMapping type for use with // apply. func PrefixedClaimMapping() *PrefixedClaimMappingApplyConfiguration { return &PrefixedClaimMappingApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go index 15723fcc6..f0e90acc9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ProfileCustomizationsApplyConfiguration represents an declarative configuration of the ProfileCustomizations type for use +// ProfileCustomizationsApplyConfiguration represents a declarative configuration of the ProfileCustomizations type for use // with apply. type ProfileCustomizationsApplyConfiguration struct { DynamicResourceAllocation *v1.DRAEnablement `json:"dynamicResourceAllocation,omitempty"` } -// ProfileCustomizationsApplyConfiguration constructs an declarative configuration of the ProfileCustomizations type for use with +// ProfileCustomizationsApplyConfiguration constructs a declarative configuration of the ProfileCustomizations type for use with // apply. func ProfileCustomizations() *ProfileCustomizationsApplyConfiguration { return &ProfileCustomizationsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go index 9e074da6c..864277b59 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ProjectApplyConfiguration represents an declarative configuration of the Project type for use +// ProjectApplyConfiguration represents a declarative configuration of the Project type for use // with apply. type ProjectApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ProjectApplyConfiguration struct { Status *apiconfigv1.ProjectStatus `json:"status,omitempty"` } -// Project constructs an declarative configuration of the Project type for use with +// Project constructs a declarative configuration of the Project type for use with // apply. func Project(name string) *ProjectApplyConfiguration { b := &ProjectApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ProjectApplyConfiguration) WithStatus(value apiconfigv1.ProjectStatus) b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ProjectApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go index 0e0a2334e..417be90be 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go @@ -2,14 +2,14 @@ package v1 -// ProjectSpecApplyConfiguration represents an declarative configuration of the ProjectSpec type for use +// ProjectSpecApplyConfiguration represents a declarative configuration of the ProjectSpec type for use // with apply. type ProjectSpecApplyConfiguration struct { ProjectRequestMessage *string `json:"projectRequestMessage,omitempty"` ProjectRequestTemplate *TemplateReferenceApplyConfiguration `json:"projectRequestTemplate,omitempty"` } -// ProjectSpecApplyConfiguration constructs an declarative configuration of the ProjectSpec type for use with +// ProjectSpecApplyConfiguration constructs a declarative configuration of the ProjectSpec type for use with // apply. func ProjectSpec() *ProjectSpecApplyConfiguration { return &ProjectSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go index 282559a40..e3f40e4f9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go @@ -2,13 +2,13 @@ package v1 -// PromQLClusterConditionApplyConfiguration represents an declarative configuration of the PromQLClusterCondition type for use +// PromQLClusterConditionApplyConfiguration represents a declarative configuration of the PromQLClusterCondition type for use // with apply. type PromQLClusterConditionApplyConfiguration struct { PromQL *string `json:"promql,omitempty"` } -// PromQLClusterConditionApplyConfiguration constructs an declarative configuration of the PromQLClusterCondition type for use with +// PromQLClusterConditionApplyConfiguration constructs a declarative configuration of the PromQLClusterCondition type for use with // apply. func PromQLClusterCondition() *PromQLClusterConditionApplyConfiguration { return &PromQLClusterConditionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go index 79360af8e..fdfe260f5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ProxyApplyConfiguration represents an declarative configuration of the Proxy type for use +// ProxyApplyConfiguration represents a declarative configuration of the Proxy type for use // with apply. type ProxyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ProxyApplyConfiguration struct { Status *ProxyStatusApplyConfiguration `json:"status,omitempty"` } -// Proxy constructs an declarative configuration of the Proxy type for use with +// Proxy constructs a declarative configuration of the Proxy type for use with // apply. func Proxy(name string) *ProxyApplyConfiguration { b := &ProxyApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *ProxyApplyConfiguration) WithStatus(value *ProxyStatusApplyConfiguratio b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ProxyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go index 0eecac56c..bd2cf6657 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go @@ -2,7 +2,7 @@ package v1 -// ProxySpecApplyConfiguration represents an declarative configuration of the ProxySpec type for use +// ProxySpecApplyConfiguration represents a declarative configuration of the ProxySpec type for use // with apply. type ProxySpecApplyConfiguration struct { HTTPProxy *string `json:"httpProxy,omitempty"` @@ -12,7 +12,7 @@ type ProxySpecApplyConfiguration struct { TrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"trustedCA,omitempty"` } -// ProxySpecApplyConfiguration constructs an declarative configuration of the ProxySpec type for use with +// ProxySpecApplyConfiguration constructs a declarative configuration of the ProxySpec type for use with // apply. func ProxySpec() *ProxySpecApplyConfiguration { return &ProxySpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go index 069d479fb..784afdff6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go @@ -2,7 +2,7 @@ package v1 -// ProxyStatusApplyConfiguration represents an declarative configuration of the ProxyStatus type for use +// ProxyStatusApplyConfiguration represents a declarative configuration of the ProxyStatus type for use // with apply. type ProxyStatusApplyConfiguration struct { HTTPProxy *string `json:"httpProxy,omitempty"` @@ -10,7 +10,7 @@ type ProxyStatusApplyConfiguration struct { NoProxy *string `json:"noProxy,omitempty"` } -// ProxyStatusApplyConfiguration constructs an declarative configuration of the ProxyStatus type for use with +// ProxyStatusApplyConfiguration constructs a declarative configuration of the ProxyStatus type for use with // apply. func ProxyStatus() *ProxyStatusApplyConfiguration { return &ProxyStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go index 2f48be932..d4aaa4e1e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go @@ -2,14 +2,14 @@ package v1 -// RegistryLocationApplyConfiguration represents an declarative configuration of the RegistryLocation type for use +// RegistryLocationApplyConfiguration represents a declarative configuration of the RegistryLocation type for use // with apply. type RegistryLocationApplyConfiguration struct { DomainName *string `json:"domainName,omitempty"` Insecure *bool `json:"insecure,omitempty"` } -// RegistryLocationApplyConfiguration constructs an declarative configuration of the RegistryLocation type for use with +// RegistryLocationApplyConfiguration constructs a declarative configuration of the RegistryLocation type for use with // apply. func RegistryLocation() *RegistryLocationApplyConfiguration { return &RegistryLocationApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go index 02ff90c57..a92592f30 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go @@ -2,7 +2,7 @@ package v1 -// RegistrySourcesApplyConfiguration represents an declarative configuration of the RegistrySources type for use +// RegistrySourcesApplyConfiguration represents a declarative configuration of the RegistrySources type for use // with apply. type RegistrySourcesApplyConfiguration struct { InsecureRegistries []string `json:"insecureRegistries,omitempty"` @@ -11,7 +11,7 @@ type RegistrySourcesApplyConfiguration struct { ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` } -// RegistrySourcesApplyConfiguration constructs an declarative configuration of the RegistrySources type for use with +// RegistrySourcesApplyConfiguration constructs a declarative configuration of the RegistrySources type for use with // apply. func RegistrySources() *RegistrySourcesApplyConfiguration { return &RegistrySourcesApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go index 18b3f76f8..4ffecd926 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// ReleaseApplyConfiguration represents an declarative configuration of the Release type for use +// ReleaseApplyConfiguration represents a declarative configuration of the Release type for use // with apply. type ReleaseApplyConfiguration struct { Version *string `json:"version,omitempty"` @@ -15,7 +15,7 @@ type ReleaseApplyConfiguration struct { Channels []string `json:"channels,omitempty"` } -// ReleaseApplyConfiguration constructs an declarative configuration of the Release type for use with +// ReleaseApplyConfiguration constructs a declarative configuration of the Release type for use with // apply. func Release() *ReleaseApplyConfiguration { return &ReleaseApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go index 2806aea92..f903170f3 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// RepositoryDigestMirrorsApplyConfiguration represents an declarative configuration of the RepositoryDigestMirrors type for use +// RepositoryDigestMirrorsApplyConfiguration represents a declarative configuration of the RepositoryDigestMirrors type for use // with apply. type RepositoryDigestMirrorsApplyConfiguration struct { Source *string `json:"source,omitempty"` @@ -14,7 +14,7 @@ type RepositoryDigestMirrorsApplyConfiguration struct { Mirrors []v1.Mirror `json:"mirrors,omitempty"` } -// RepositoryDigestMirrorsApplyConfiguration constructs an declarative configuration of the RepositoryDigestMirrors type for use with +// RepositoryDigestMirrorsApplyConfiguration constructs a declarative configuration of the RepositoryDigestMirrors type for use with // apply. func RepositoryDigestMirrors() *RepositoryDigestMirrorsApplyConfiguration { return &RepositoryDigestMirrorsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go index fa787ce7c..2911473d0 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go @@ -2,7 +2,7 @@ package v1 -// RequestHeaderIdentityProviderApplyConfiguration represents an declarative configuration of the RequestHeaderIdentityProvider type for use +// RequestHeaderIdentityProviderApplyConfiguration represents a declarative configuration of the RequestHeaderIdentityProvider type for use // with apply. type RequestHeaderIdentityProviderApplyConfiguration struct { LoginURL *string `json:"loginURL,omitempty"` @@ -15,7 +15,7 @@ type RequestHeaderIdentityProviderApplyConfiguration struct { EmailHeaders []string `json:"emailHeaders,omitempty"` } -// RequestHeaderIdentityProviderApplyConfiguration constructs an declarative configuration of the RequestHeaderIdentityProvider type for use with +// RequestHeaderIdentityProviderApplyConfiguration constructs a declarative configuration of the RequestHeaderIdentityProvider type for use with // apply. func RequestHeaderIdentityProvider() *RequestHeaderIdentityProviderApplyConfiguration { return &RequestHeaderIdentityProviderApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go index b4469882f..a9ffe1350 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go @@ -4,20 +4,20 @@ package v1 import ( apiconfigv1 "github.com/openshift/api/config/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RequiredHSTSPolicyApplyConfiguration represents an declarative configuration of the RequiredHSTSPolicy type for use +// RequiredHSTSPolicyApplyConfiguration represents a declarative configuration of the RequiredHSTSPolicy type for use // with apply. type RequiredHSTSPolicyApplyConfiguration struct { - NamespaceSelector *v1.LabelSelector `json:"namespaceSelector,omitempty"` + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` DomainPatterns []string `json:"domainPatterns,omitempty"` MaxAge *MaxAgePolicyApplyConfiguration `json:"maxAge,omitempty"` PreloadPolicy *apiconfigv1.PreloadPolicy `json:"preloadPolicy,omitempty"` IncludeSubDomainsPolicy *apiconfigv1.IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` } -// RequiredHSTSPolicyApplyConfiguration constructs an declarative configuration of the RequiredHSTSPolicy type for use with +// RequiredHSTSPolicyApplyConfiguration constructs a declarative configuration of the RequiredHSTSPolicy type for use with // apply. func RequiredHSTSPolicy() *RequiredHSTSPolicyApplyConfiguration { return &RequiredHSTSPolicyApplyConfiguration{} @@ -26,8 +26,8 @@ func RequiredHSTSPolicy() *RequiredHSTSPolicyApplyConfiguration { // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *RequiredHSTSPolicyApplyConfiguration) WithNamespaceSelector(value v1.LabelSelector) *RequiredHSTSPolicyApplyConfiguration { - b.NamespaceSelector = &value +func (b *RequiredHSTSPolicyApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration { + b.NamespaceSelector = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go index d9de60a42..33e73fab9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// SchedulerApplyConfiguration represents an declarative configuration of the Scheduler type for use +// SchedulerApplyConfiguration represents a declarative configuration of the Scheduler type for use // with apply. type SchedulerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type SchedulerApplyConfiguration struct { Status *apiconfigv1.SchedulerStatus `json:"status,omitempty"` } -// Scheduler constructs an declarative configuration of the Scheduler type for use with +// Scheduler constructs a declarative configuration of the Scheduler type for use with // apply. func Scheduler(name string) *SchedulerApplyConfiguration { b := &SchedulerApplyConfiguration{} @@ -238,3 +238,9 @@ func (b *SchedulerApplyConfiguration) WithStatus(value apiconfigv1.SchedulerStat b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SchedulerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go index 1df067067..2160ab2ff 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go @@ -6,7 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// SchedulerSpecApplyConfiguration represents an declarative configuration of the SchedulerSpec type for use +// SchedulerSpecApplyConfiguration represents a declarative configuration of the SchedulerSpec type for use // with apply. type SchedulerSpecApplyConfiguration struct { Policy *ConfigMapNameReferenceApplyConfiguration `json:"policy,omitempty"` @@ -16,7 +16,7 @@ type SchedulerSpecApplyConfiguration struct { MastersSchedulable *bool `json:"mastersSchedulable,omitempty"` } -// SchedulerSpecApplyConfiguration constructs an declarative configuration of the SchedulerSpec type for use with +// SchedulerSpecApplyConfiguration constructs a declarative configuration of the SchedulerSpec type for use with // apply. func SchedulerSpec() *SchedulerSpecApplyConfiguration { return &SchedulerSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go index 9cd673082..692056c6b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go @@ -2,13 +2,13 @@ package v1 -// SecretNameReferenceApplyConfiguration represents an declarative configuration of the SecretNameReference type for use +// SecretNameReferenceApplyConfiguration represents a declarative configuration of the SecretNameReference type for use // with apply. type SecretNameReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// SecretNameReferenceApplyConfiguration constructs an declarative configuration of the SecretNameReference type for use with +// SecretNameReferenceApplyConfiguration constructs a declarative configuration of the SecretNameReference type for use with // apply. func SecretNameReference() *SecretNameReferenceApplyConfiguration { return &SecretNameReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go index b0d95c6e7..918f13df6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go @@ -2,14 +2,14 @@ package v1 -// SignatureStoreApplyConfiguration represents an declarative configuration of the SignatureStore type for use +// SignatureStoreApplyConfiguration represents a declarative configuration of the SignatureStore type for use // with apply. type SignatureStoreApplyConfiguration struct { URL *string `json:"url,omitempty"` CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"` } -// SignatureStoreApplyConfiguration constructs an declarative configuration of the SignatureStore type for use with +// SignatureStoreApplyConfiguration constructs a declarative configuration of the SignatureStore type for use with // apply. func SignatureStore() *SignatureStoreApplyConfiguration { return &SignatureStoreApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go index e1da30be3..30112046a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go @@ -2,13 +2,13 @@ package v1 -// TemplateReferenceApplyConfiguration represents an declarative configuration of the TemplateReference type for use +// TemplateReferenceApplyConfiguration represents a declarative configuration of the TemplateReference type for use // with apply. type TemplateReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// TemplateReferenceApplyConfiguration constructs an declarative configuration of the TemplateReference type for use with +// TemplateReferenceApplyConfiguration constructs a declarative configuration of the TemplateReference type for use with // apply. func TemplateReference() *TemplateReferenceApplyConfiguration { return &TemplateReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go index 3e08cc9ad..c874fcf0c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// TLSProfileSpecApplyConfiguration represents an declarative configuration of the TLSProfileSpec type for use +// TLSProfileSpecApplyConfiguration represents a declarative configuration of the TLSProfileSpec type for use // with apply. type TLSProfileSpecApplyConfiguration struct { Ciphers []string `json:"ciphers,omitempty"` MinTLSVersion *v1.TLSProtocolVersion `json:"minTLSVersion,omitempty"` } -// TLSProfileSpecApplyConfiguration constructs an declarative configuration of the TLSProfileSpec type for use with +// TLSProfileSpecApplyConfiguration constructs a declarative configuration of the TLSProfileSpec type for use with // apply. func TLSProfileSpec() *TLSProfileSpecApplyConfiguration { return &TLSProfileSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go index fb32e8516..004632489 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// TLSSecurityProfileApplyConfiguration represents an declarative configuration of the TLSSecurityProfile type for use +// TLSSecurityProfileApplyConfiguration represents a declarative configuration of the TLSSecurityProfile type for use // with apply. type TLSSecurityProfileApplyConfiguration struct { Type *v1.TLSProfileType `json:"type,omitempty"` @@ -16,7 +16,7 @@ type TLSSecurityProfileApplyConfiguration struct { Custom *CustomTLSProfileApplyConfiguration `json:"custom,omitempty"` } -// TLSSecurityProfileApplyConfiguration constructs an declarative configuration of the TLSSecurityProfile type for use with +// TLSSecurityProfileApplyConfiguration constructs a declarative configuration of the TLSSecurityProfile type for use with // apply. func TLSSecurityProfile() *TLSSecurityProfileApplyConfiguration { return &TLSSecurityProfileApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go index 91c29b61c..dbd509f06 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go @@ -2,13 +2,13 @@ package v1 -// TokenClaimMappingApplyConfiguration represents an declarative configuration of the TokenClaimMapping type for use +// TokenClaimMappingApplyConfiguration represents a declarative configuration of the TokenClaimMapping type for use // with apply. type TokenClaimMappingApplyConfiguration struct { Claim *string `json:"claim,omitempty"` } -// TokenClaimMappingApplyConfiguration constructs an declarative configuration of the TokenClaimMapping type for use with +// TokenClaimMappingApplyConfiguration constructs a declarative configuration of the TokenClaimMapping type for use with // apply. func TokenClaimMapping() *TokenClaimMappingApplyConfiguration { return &TokenClaimMappingApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go index 1a2fdb095..9b3b0bb56 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go @@ -2,14 +2,14 @@ package v1 -// TokenClaimMappingsApplyConfiguration represents an declarative configuration of the TokenClaimMappings type for use +// TokenClaimMappingsApplyConfiguration represents a declarative configuration of the TokenClaimMappings type for use // with apply. type TokenClaimMappingsApplyConfiguration struct { Username *UsernameClaimMappingApplyConfiguration `json:"username,omitempty"` Groups *PrefixedClaimMappingApplyConfiguration `json:"groups,omitempty"` } -// TokenClaimMappingsApplyConfiguration constructs an declarative configuration of the TokenClaimMappings type for use with +// TokenClaimMappingsApplyConfiguration constructs a declarative configuration of the TokenClaimMappings type for use with // apply. func TokenClaimMappings() *TokenClaimMappingsApplyConfiguration { return &TokenClaimMappingsApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go index 6793f9327..ad9968eef 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go @@ -6,14 +6,14 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// TokenClaimValidationRuleApplyConfiguration represents an declarative configuration of the TokenClaimValidationRule type for use +// TokenClaimValidationRuleApplyConfiguration represents a declarative configuration of the TokenClaimValidationRule type for use // with apply. type TokenClaimValidationRuleApplyConfiguration struct { Type *v1.TokenValidationRuleType `json:"type,omitempty"` RequiredClaim *TokenRequiredClaimApplyConfiguration `json:"requiredClaim,omitempty"` } -// TokenClaimValidationRuleApplyConfiguration constructs an declarative configuration of the TokenClaimValidationRule type for use with +// TokenClaimValidationRuleApplyConfiguration constructs a declarative configuration of the TokenClaimValidationRule type for use with // apply. func TokenClaimValidationRule() *TokenClaimValidationRuleApplyConfiguration { return &TokenClaimValidationRuleApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go index 865d4ddbe..7a103935e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go @@ -6,7 +6,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TokenConfigApplyConfiguration represents an declarative configuration of the TokenConfig type for use +// TokenConfigApplyConfiguration represents a declarative configuration of the TokenConfig type for use // with apply. type TokenConfigApplyConfiguration struct { AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty"` @@ -14,7 +14,7 @@ type TokenConfigApplyConfiguration struct { AccessTokenInactivityTimeout *v1.Duration `json:"accessTokenInactivityTimeout,omitempty"` } -// TokenConfigApplyConfiguration constructs an declarative configuration of the TokenConfig type for use with +// TokenConfigApplyConfiguration constructs a declarative configuration of the TokenConfig type for use with // apply. func TokenConfig() *TokenConfigApplyConfiguration { return &TokenConfigApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go index 808e61a1d..0811ca729 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// TokenIssuerApplyConfiguration represents an declarative configuration of the TokenIssuer type for use +// TokenIssuerApplyConfiguration represents a declarative configuration of the TokenIssuer type for use // with apply. type TokenIssuerApplyConfiguration struct { URL *string `json:"issuerURL,omitempty"` @@ -14,7 +14,7 @@ type TokenIssuerApplyConfiguration struct { CertificateAuthority *ConfigMapNameReferenceApplyConfiguration `json:"issuerCertificateAuthority,omitempty"` } -// TokenIssuerApplyConfiguration constructs an declarative configuration of the TokenIssuer type for use with +// TokenIssuerApplyConfiguration constructs a declarative configuration of the TokenIssuer type for use with // apply. func TokenIssuer() *TokenIssuerApplyConfiguration { return &TokenIssuerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go index f7ae34d01..6dec5b2a1 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go @@ -2,14 +2,14 @@ package v1 -// TokenRequiredClaimApplyConfiguration represents an declarative configuration of the TokenRequiredClaim type for use +// TokenRequiredClaimApplyConfiguration represents a declarative configuration of the TokenRequiredClaim type for use // with apply. type TokenRequiredClaimApplyConfiguration struct { Claim *string `json:"claim,omitempty"` RequiredValue *string `json:"requiredValue,omitempty"` } -// TokenRequiredClaimApplyConfiguration constructs an declarative configuration of the TokenRequiredClaim type for use with +// TokenRequiredClaimApplyConfiguration constructs a declarative configuration of the TokenRequiredClaim type for use with // apply. func TokenRequiredClaim() *TokenRequiredClaimApplyConfiguration { return &TokenRequiredClaimApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go index 1f63851c2..1d1547d87 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// UpdateApplyConfiguration represents an declarative configuration of the Update type for use +// UpdateApplyConfiguration represents a declarative configuration of the Update type for use // with apply. type UpdateApplyConfiguration struct { Architecture *v1.ClusterVersionArchitecture `json:"architecture,omitempty"` @@ -15,7 +15,7 @@ type UpdateApplyConfiguration struct { Force *bool `json:"force,omitempty"` } -// UpdateApplyConfiguration constructs an declarative configuration of the Update type for use with +// UpdateApplyConfiguration constructs a declarative configuration of the Update type for use with // apply. func Update() *UpdateApplyConfiguration { return &UpdateApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go index 17dc2a0a1..6a4deaf3f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go @@ -7,7 +7,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// UpdateHistoryApplyConfiguration represents an declarative configuration of the UpdateHistory type for use +// UpdateHistoryApplyConfiguration represents a declarative configuration of the UpdateHistory type for use // with apply. type UpdateHistoryApplyConfiguration struct { State *v1.UpdateState `json:"state,omitempty"` @@ -19,7 +19,7 @@ type UpdateHistoryApplyConfiguration struct { AcceptedRisks *string `json:"acceptedRisks,omitempty"` } -// UpdateHistoryApplyConfiguration constructs an declarative configuration of the UpdateHistory type for use with +// UpdateHistoryApplyConfiguration constructs a declarative configuration of the UpdateHistory type for use with // apply. func UpdateHistory() *UpdateHistoryApplyConfiguration { return &UpdateHistoryApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go index 641fb48b2..daec7fb46 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go @@ -6,7 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// UsernameClaimMappingApplyConfiguration represents an declarative configuration of the UsernameClaimMapping type for use +// UsernameClaimMappingApplyConfiguration represents a declarative configuration of the UsernameClaimMapping type for use // with apply. type UsernameClaimMappingApplyConfiguration struct { TokenClaimMappingApplyConfiguration `json:",inline"` @@ -14,7 +14,7 @@ type UsernameClaimMappingApplyConfiguration struct { Prefix *UsernamePrefixApplyConfiguration `json:"prefix,omitempty"` } -// UsernameClaimMappingApplyConfiguration constructs an declarative configuration of the UsernameClaimMapping type for use with +// UsernameClaimMappingApplyConfiguration constructs a declarative configuration of the UsernameClaimMapping type for use with // apply. func UsernameClaimMapping() *UsernameClaimMappingApplyConfiguration { return &UsernameClaimMappingApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go index b95bc9ba6..03720723b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go @@ -2,13 +2,13 @@ package v1 -// UsernamePrefixApplyConfiguration represents an declarative configuration of the UsernamePrefix type for use +// UsernamePrefixApplyConfiguration represents a declarative configuration of the UsernamePrefix type for use // with apply. type UsernamePrefixApplyConfiguration struct { PrefixString *string `json:"prefixString,omitempty"` } -// UsernamePrefixApplyConfiguration constructs an declarative configuration of the UsernamePrefix type for use with +// UsernamePrefixApplyConfiguration constructs a declarative configuration of the UsernamePrefix type for use with // apply. func UsernamePrefix() *UsernamePrefixApplyConfiguration { return &UsernamePrefixApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go index 0bad0fadf..1c5ec2cf1 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go @@ -2,7 +2,7 @@ package v1 -// VSpherePlatformFailureDomainSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformFailureDomainSpec type for use +// VSpherePlatformFailureDomainSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformFailureDomainSpec type for use // with apply. type VSpherePlatformFailureDomainSpecApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -12,7 +12,7 @@ type VSpherePlatformFailureDomainSpecApplyConfiguration struct { Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` } -// VSpherePlatformFailureDomainSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformFailureDomainSpec type for use with +// VSpherePlatformFailureDomainSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformFailureDomainSpec type for use with // apply. func VSpherePlatformFailureDomainSpec() *VSpherePlatformFailureDomainSpecApplyConfiguration { return &VSpherePlatformFailureDomainSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go index 873f0289e..d1dcebec4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/config/v1" ) -// VSpherePlatformLoadBalancerApplyConfiguration represents an declarative configuration of the VSpherePlatformLoadBalancer type for use +// VSpherePlatformLoadBalancerApplyConfiguration represents a declarative configuration of the VSpherePlatformLoadBalancer type for use // with apply. type VSpherePlatformLoadBalancerApplyConfiguration struct { Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` } -// VSpherePlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the VSpherePlatformLoadBalancer type for use with +// VSpherePlatformLoadBalancerApplyConfiguration constructs a declarative configuration of the VSpherePlatformLoadBalancer type for use with // apply. func VSpherePlatformLoadBalancer() *VSpherePlatformLoadBalancerApplyConfiguration { return &VSpherePlatformLoadBalancerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go index 042737f1d..f83a0c50a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go @@ -2,14 +2,14 @@ package v1 -// VSpherePlatformNodeNetworkingApplyConfiguration represents an declarative configuration of the VSpherePlatformNodeNetworking type for use +// VSpherePlatformNodeNetworkingApplyConfiguration represents a declarative configuration of the VSpherePlatformNodeNetworking type for use // with apply. type VSpherePlatformNodeNetworkingApplyConfiguration struct { External *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"external,omitempty"` Internal *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"internal,omitempty"` } -// VSpherePlatformNodeNetworkingApplyConfiguration constructs an declarative configuration of the VSpherePlatformNodeNetworking type for use with +// VSpherePlatformNodeNetworkingApplyConfiguration constructs a declarative configuration of the VSpherePlatformNodeNetworking type for use with // apply. func VSpherePlatformNodeNetworking() *VSpherePlatformNodeNetworkingApplyConfiguration { return &VSpherePlatformNodeNetworkingApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go index e13c42d64..670448d3c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go @@ -2,7 +2,7 @@ package v1 -// VSpherePlatformNodeNetworkingSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use +// VSpherePlatformNodeNetworkingSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use // with apply. type VSpherePlatformNodeNetworkingSpecApplyConfiguration struct { NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"` @@ -10,7 +10,7 @@ type VSpherePlatformNodeNetworkingSpecApplyConfiguration struct { ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"` } -// VSpherePlatformNodeNetworkingSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use with +// VSpherePlatformNodeNetworkingSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use with // apply. func VSpherePlatformNodeNetworkingSpec() *VSpherePlatformNodeNetworkingSpecApplyConfiguration { return &VSpherePlatformNodeNetworkingSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go index f51fc93d5..d0d191331 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go @@ -6,7 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// VSpherePlatformSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformSpec type for use +// VSpherePlatformSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformSpec type for use // with apply. type VSpherePlatformSpecApplyConfiguration struct { VCenters []VSpherePlatformVCenterSpecApplyConfiguration `json:"vcenters,omitempty"` @@ -17,7 +17,7 @@ type VSpherePlatformSpecApplyConfiguration struct { MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } -// VSpherePlatformSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformSpec type for use with +// VSpherePlatformSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformSpec type for use with // apply. func VSpherePlatformSpec() *VSpherePlatformSpecApplyConfiguration { return &VSpherePlatformSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go index 11a0420d5..36696df71 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go @@ -6,7 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" ) -// VSpherePlatformStatusApplyConfiguration represents an declarative configuration of the VSpherePlatformStatus type for use +// VSpherePlatformStatusApplyConfiguration represents a declarative configuration of the VSpherePlatformStatus type for use // with apply. type VSpherePlatformStatusApplyConfiguration struct { APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` @@ -18,7 +18,7 @@ type VSpherePlatformStatusApplyConfiguration struct { MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"` } -// VSpherePlatformStatusApplyConfiguration constructs an declarative configuration of the VSpherePlatformStatus type for use with +// VSpherePlatformStatusApplyConfiguration constructs a declarative configuration of the VSpherePlatformStatus type for use with // apply. func VSpherePlatformStatus() *VSpherePlatformStatusApplyConfiguration { return &VSpherePlatformStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go index a16213812..a3036a5cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go @@ -2,7 +2,7 @@ package v1 -// VSpherePlatformTopologyApplyConfiguration represents an declarative configuration of the VSpherePlatformTopology type for use +// VSpherePlatformTopologyApplyConfiguration represents a declarative configuration of the VSpherePlatformTopology type for use // with apply. type VSpherePlatformTopologyApplyConfiguration struct { Datacenter *string `json:"datacenter,omitempty"` @@ -14,7 +14,7 @@ type VSpherePlatformTopologyApplyConfiguration struct { Template *string `json:"template,omitempty"` } -// VSpherePlatformTopologyApplyConfiguration constructs an declarative configuration of the VSpherePlatformTopology type for use with +// VSpherePlatformTopologyApplyConfiguration constructs a declarative configuration of the VSpherePlatformTopology type for use with // apply. func VSpherePlatformTopology() *VSpherePlatformTopologyApplyConfiguration { return &VSpherePlatformTopologyApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go index 59b2261c2..ff6527618 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go @@ -2,7 +2,7 @@ package v1 -// VSpherePlatformVCenterSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformVCenterSpec type for use +// VSpherePlatformVCenterSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformVCenterSpec type for use // with apply. type VSpherePlatformVCenterSpecApplyConfiguration struct { Server *string `json:"server,omitempty"` @@ -10,7 +10,7 @@ type VSpherePlatformVCenterSpecApplyConfiguration struct { Datacenters []string `json:"datacenters,omitempty"` } -// VSpherePlatformVCenterSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformVCenterSpec type for use with +// VSpherePlatformVCenterSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformVCenterSpec type for use with // apply. func VSpherePlatformVCenterSpec() *VSpherePlatformVCenterSpecApplyConfiguration { return &VSpherePlatformVCenterSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go index fc1500859..4ed9e2d2d 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go @@ -2,13 +2,13 @@ package v1 -// WebhookTokenAuthenticatorApplyConfiguration represents an declarative configuration of the WebhookTokenAuthenticator type for use +// WebhookTokenAuthenticatorApplyConfiguration represents a declarative configuration of the WebhookTokenAuthenticator type for use // with apply. type WebhookTokenAuthenticatorApplyConfiguration struct { KubeConfig *SecretNameReferenceApplyConfiguration `json:"kubeConfig,omitempty"` } -// WebhookTokenAuthenticatorApplyConfiguration constructs an declarative configuration of the WebhookTokenAuthenticator type for use with +// WebhookTokenAuthenticatorApplyConfiguration constructs a declarative configuration of the WebhookTokenAuthenticator type for use with // apply. func WebhookTokenAuthenticator() *WebhookTokenAuthenticatorApplyConfiguration { return &WebhookTokenAuthenticatorApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 64d6d2ae7..003e4d2ef 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -373,19 +373,19 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: ingressIPs type: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: machineNetworks type: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: com.github.openshift.api.config.v1.BareMetalPlatformStatus map: fields: @@ -397,7 +397,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: ingressIP type: scalar: string @@ -406,7 +406,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: loadBalancer type: namedType: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer @@ -417,7 +417,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: nodeDNSIP type: scalar: string @@ -630,7 +630,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: extension type: namedType: __untyped_atomic_ - default: {} - name: relatedObjects type: list: @@ -649,7 +648,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1621,6 +1619,10 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: imageStreamImportMode + type: + scalar: string + default: "" - name: registrySources type: namedType: com.github.openshift.api.config.v1.RegistrySources @@ -1634,6 +1636,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: imageStreamImportMode + type: + scalar: string - name: internalRegistryHostname type: scalar: string @@ -2022,6 +2027,49 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.NetworkStatus default: {} +- name: com.github.openshift.api.config.v1.NetworkDiagnostics + map: + fields: + - name: mode + type: + scalar: string + default: "" + - name: sourcePlacement + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement + default: {} + - name: targetPlacement + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement + default: {} +- name: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic - name: com.github.openshift.api.config.v1.NetworkMigration map: fields: @@ -2043,6 +2091,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: externalIP type: namedType: com.github.openshift.api.config.v1.ExternalIPConfig + - name: networkDiagnostics + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnostics + default: {} - name: networkType type: scalar: string @@ -2120,16 +2172,15 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string - name: com.github.openshift.api.config.v1.NodeStatus map: - elementType: - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: com.github.openshift.api.config.v1.NutanixFailureDomain map: fields: @@ -2526,19 +2577,19 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: ingressIPs type: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: machineNetworks type: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: com.github.openshift.api.config.v1.OpenStackPlatformStatus map: fields: @@ -2550,7 +2601,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: cloudName type: scalar: string @@ -2562,7 +2613,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: loadBalancer type: namedType: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer @@ -2573,7 +2624,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: nodeDNSIP type: scalar: string @@ -3298,7 +3349,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: state type: scalar: string @@ -3401,7 +3451,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: failureDomains type: list: @@ -3415,13 +3465,13 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: machineNetworks type: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: nodeNetworking type: namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking @@ -3443,7 +3493,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: ingressIP type: scalar: string @@ -3452,7 +3502,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: loadBalancer type: namedType: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer @@ -3463,7 +3513,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: nodeDNSIP type: scalar: string @@ -3865,6 +3915,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -3915,6 +3966,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: request + type: + scalar: string - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -3924,7 +3978,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: divisor type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: resource type: scalar: string @@ -3961,6 +4014,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -3991,7 +4045,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4092,7 +4145,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go index d4fff3f95..5751cebe7 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // APIServersGetter has a method to return a APIServerInterface. @@ -27,6 +24,7 @@ type APIServersGetter interface { type APIServerInterface interface { Create(ctx context.Context, aPIServer *v1.APIServer, opts metav1.CreateOptions) (*v1.APIServer, error) Update(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (*v1.APIServer, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (*v1.APIServer, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type APIServerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIServer, err error) Apply(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) APIServerExpansion } // aPIServers implements APIServerInterface type aPIServers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.APIServer, *v1.APIServerList, *configv1.APIServerApplyConfiguration] } // newAPIServers returns a APIServers func newAPIServers(c *ConfigV1Client) *aPIServers { return &aPIServers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.APIServer, *v1.APIServerList, *configv1.APIServerApplyConfiguration]( + "apiservers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.APIServer { return &v1.APIServer{} }, + func() *v1.APIServerList { return &v1.APIServerList{} }), } } - -// Get takes name of the aPIServer, and returns the corresponding aPIServer object, and an error if there is any. -func (c *aPIServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.APIServer, err error) { - result = &v1.APIServer{} - err = c.client.Get(). - Resource("apiservers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of APIServers that match those selectors. -func (c *aPIServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.APIServerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.APIServerList{} - err = c.client.Get(). - Resource("apiservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested aPIServers. -func (c *aPIServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("apiservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a aPIServer and creates it. Returns the server's representation of the aPIServer, and an error, if there is any. -func (c *aPIServers) Create(ctx context.Context, aPIServer *v1.APIServer, opts metav1.CreateOptions) (result *v1.APIServer, err error) { - result = &v1.APIServer{} - err = c.client.Post(). - Resource("apiservers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIServer). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a aPIServer and updates it. Returns the server's representation of the aPIServer, and an error, if there is any. -func (c *aPIServers) Update(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (result *v1.APIServer, err error) { - result = &v1.APIServer{} - err = c.client.Put(). - Resource("apiservers"). - Name(aPIServer.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIServer). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *aPIServers) UpdateStatus(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (result *v1.APIServer, err error) { - result = &v1.APIServer{} - err = c.client.Put(). - Resource("apiservers"). - Name(aPIServer.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(aPIServer). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the aPIServer and deletes it. Returns an error if one occurs. -func (c *aPIServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("apiservers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *aPIServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("apiservers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched aPIServer. -func (c *aPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIServer, err error) { - result = &v1.APIServer{} - err = c.client.Patch(pt). - Resource("apiservers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied aPIServer. -func (c *aPIServers) Apply(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) { - if aPIServer == nil { - return nil, fmt.Errorf("aPIServer provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(aPIServer) - if err != nil { - return nil, err - } - name := aPIServer.Name - if name == nil { - return nil, fmt.Errorf("aPIServer.Name must be provided to Apply") - } - result = &v1.APIServer{} - err = c.client.Patch(types.ApplyPatchType). - Resource("apiservers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *aPIServers) ApplyStatus(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) { - if aPIServer == nil { - return nil, fmt.Errorf("aPIServer provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(aPIServer) - if err != nil { - return nil, err - } - - name := aPIServer.Name - if name == nil { - return nil, fmt.Errorf("aPIServer.Name must be provided to Apply") - } - - result = &v1.APIServer{} - err = c.client.Patch(types.ApplyPatchType). - Resource("apiservers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go index 91c5d99d2..89b1779b3 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // AuthenticationsGetter has a method to return a AuthenticationInterface. @@ -27,6 +24,7 @@ type AuthenticationsGetter interface { type AuthenticationInterface interface { Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (*v1.Authentication, error) Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type AuthenticationInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error) Apply(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) AuthenticationExpansion } // authentications implements AuthenticationInterface type authentications struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Authentication, *v1.AuthenticationList, *configv1.AuthenticationApplyConfiguration] } // newAuthentications returns a Authentications func newAuthentications(c *ConfigV1Client) *authentications { return &authentications{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Authentication, *v1.AuthenticationList, *configv1.AuthenticationApplyConfiguration]( + "authentications", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Authentication { return &v1.Authentication{} }, + func() *v1.AuthenticationList { return &v1.AuthenticationList{} }), } } - -// Get takes name of the authentication, and returns the corresponding authentication object, and an error if there is any. -func (c *authentications) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Authentication, err error) { - result = &v1.Authentication{} - err = c.client.Get(). - Resource("authentications"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Authentications that match those selectors. -func (c *authentications) List(ctx context.Context, opts metav1.ListOptions) (result *v1.AuthenticationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.AuthenticationList{} - err = c.client.Get(). - Resource("authentications"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested authentications. -func (c *authentications) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("authentications"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a authentication and creates it. Returns the server's representation of the authentication, and an error, if there is any. -func (c *authentications) Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (result *v1.Authentication, err error) { - result = &v1.Authentication{} - err = c.client.Post(). - Resource("authentications"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(authentication). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a authentication and updates it. Returns the server's representation of the authentication, and an error, if there is any. -func (c *authentications) Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (result *v1.Authentication, err error) { - result = &v1.Authentication{} - err = c.client.Put(). - Resource("authentications"). - Name(authentication.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(authentication). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *authentications) UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (result *v1.Authentication, err error) { - result = &v1.Authentication{} - err = c.client.Put(). - Resource("authentications"). - Name(authentication.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(authentication). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the authentication and deletes it. Returns an error if one occurs. -func (c *authentications) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("authentications"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *authentications) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("authentications"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched authentication. -func (c *authentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error) { - result = &v1.Authentication{} - err = c.client.Patch(pt). - Resource("authentications"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied authentication. -func (c *authentications) Apply(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) { - if authentication == nil { - return nil, fmt.Errorf("authentication provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(authentication) - if err != nil { - return nil, err - } - name := authentication.Name - if name == nil { - return nil, fmt.Errorf("authentication.Name must be provided to Apply") - } - result = &v1.Authentication{} - err = c.client.Patch(types.ApplyPatchType). - Resource("authentications"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *authentications) ApplyStatus(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) { - if authentication == nil { - return nil, fmt.Errorf("authentication provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(authentication) - if err != nil { - return nil, err - } - - name := authentication.Name - if name == nil { - return nil, fmt.Errorf("authentication.Name must be provided to Apply") - } - - result = &v1.Authentication{} - err = c.client.Patch(types.ApplyPatchType). - Resource("authentications"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go index e2d09ef1c..2ecfb68d9 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // BuildsGetter has a method to return a BuildInterface. @@ -39,143 +36,18 @@ type BuildInterface interface { // builds implements BuildInterface type builds struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Build, *v1.BuildList, *configv1.BuildApplyConfiguration] } // newBuilds returns a Builds func newBuilds(c *ConfigV1Client) *builds { return &builds{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Build, *v1.BuildList, *configv1.BuildApplyConfiguration]( + "builds", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Build { return &v1.Build{} }, + func() *v1.BuildList { return &v1.BuildList{} }), } } - -// Get takes name of the build, and returns the corresponding build object, and an error if there is any. -func (c *builds) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Build, err error) { - result = &v1.Build{} - err = c.client.Get(). - Resource("builds"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Builds that match those selectors. -func (c *builds) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BuildList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.BuildList{} - err = c.client.Get(). - Resource("builds"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested builds. -func (c *builds) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("builds"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. -func (c *builds) Create(ctx context.Context, build *v1.Build, opts metav1.CreateOptions) (result *v1.Build, err error) { - result = &v1.Build{} - err = c.client.Post(). - Resource("builds"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(build). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. -func (c *builds) Update(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (result *v1.Build, err error) { - result = &v1.Build{} - err = c.client.Put(). - Resource("builds"). - Name(build.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(build). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the build and deletes it. Returns an error if one occurs. -func (c *builds) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("builds"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *builds) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("builds"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched build. -func (c *builds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Build, err error) { - result = &v1.Build{} - err = c.client.Patch(pt). - Resource("builds"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied build. -func (c *builds) Apply(ctx context.Context, build *configv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) { - if build == nil { - return nil, fmt.Errorf("build provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(build) - if err != nil { - return nil, err - } - name := build.Name - if name == nil { - return nil, fmt.Errorf("build.Name must be provided to Apply") - } - result = &v1.Build{} - err = c.client.Patch(types.ApplyPatchType). - Resource("builds"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go index 941a16094..b1015c40d 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ClusterOperatorsGetter has a method to return a ClusterOperatorInterface. @@ -27,6 +24,7 @@ type ClusterOperatorsGetter interface { type ClusterOperatorInterface interface { Create(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.CreateOptions) (*v1.ClusterOperator, error) Update(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ClusterOperatorInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterOperator, err error) Apply(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) ClusterOperatorExpansion } // clusterOperators implements ClusterOperatorInterface type clusterOperators struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterOperator, *v1.ClusterOperatorList, *configv1.ClusterOperatorApplyConfiguration] } // newClusterOperators returns a ClusterOperators func newClusterOperators(c *ConfigV1Client) *clusterOperators { return &clusterOperators{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterOperator, *v1.ClusterOperatorList, *configv1.ClusterOperatorApplyConfiguration]( + "clusteroperators", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterOperator { return &v1.ClusterOperator{} }, + func() *v1.ClusterOperatorList { return &v1.ClusterOperatorList{} }), } } - -// Get takes name of the clusterOperator, and returns the corresponding clusterOperator object, and an error if there is any. -func (c *clusterOperators) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterOperator, err error) { - result = &v1.ClusterOperator{} - err = c.client.Get(). - Resource("clusteroperators"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterOperators that match those selectors. -func (c *clusterOperators) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterOperatorList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterOperatorList{} - err = c.client.Get(). - Resource("clusteroperators"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterOperators. -func (c *clusterOperators) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusteroperators"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterOperator and creates it. Returns the server's representation of the clusterOperator, and an error, if there is any. -func (c *clusterOperators) Create(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.CreateOptions) (result *v1.ClusterOperator, err error) { - result = &v1.ClusterOperator{} - err = c.client.Post(). - Resource("clusteroperators"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterOperator). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterOperator and updates it. Returns the server's representation of the clusterOperator, and an error, if there is any. -func (c *clusterOperators) Update(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (result *v1.ClusterOperator, err error) { - result = &v1.ClusterOperator{} - err = c.client.Put(). - Resource("clusteroperators"). - Name(clusterOperator.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterOperator). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *clusterOperators) UpdateStatus(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (result *v1.ClusterOperator, err error) { - result = &v1.ClusterOperator{} - err = c.client.Put(). - Resource("clusteroperators"). - Name(clusterOperator.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterOperator). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterOperator and deletes it. Returns an error if one occurs. -func (c *clusterOperators) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusteroperators"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterOperators) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusteroperators"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterOperator. -func (c *clusterOperators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterOperator, err error) { - result = &v1.ClusterOperator{} - err = c.client.Patch(pt). - Resource("clusteroperators"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterOperator. -func (c *clusterOperators) Apply(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) { - if clusterOperator == nil { - return nil, fmt.Errorf("clusterOperator provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterOperator) - if err != nil { - return nil, err - } - name := clusterOperator.Name - if name == nil { - return nil, fmt.Errorf("clusterOperator.Name must be provided to Apply") - } - result = &v1.ClusterOperator{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusteroperators"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *clusterOperators) ApplyStatus(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) { - if clusterOperator == nil { - return nil, fmt.Errorf("clusterOperator provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterOperator) - if err != nil { - return nil, err - } - - name := clusterOperator.Name - if name == nil { - return nil, fmt.Errorf("clusterOperator.Name must be provided to Apply") - } - - result = &v1.ClusterOperator{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusteroperators"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go index 8b7e5b9d2..2b6717096 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ClusterVersionsGetter has a method to return a ClusterVersionInterface. @@ -27,6 +24,7 @@ type ClusterVersionsGetter interface { type ClusterVersionInterface interface { Create(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.CreateOptions) (*v1.ClusterVersion, error) Update(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (*v1.ClusterVersion, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (*v1.ClusterVersion, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ClusterVersionInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterVersion, err error) Apply(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) ClusterVersionExpansion } // clusterVersions implements ClusterVersionInterface type clusterVersions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterVersion, *v1.ClusterVersionList, *configv1.ClusterVersionApplyConfiguration] } // newClusterVersions returns a ClusterVersions func newClusterVersions(c *ConfigV1Client) *clusterVersions { return &clusterVersions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterVersion, *v1.ClusterVersionList, *configv1.ClusterVersionApplyConfiguration]( + "clusterversions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterVersion { return &v1.ClusterVersion{} }, + func() *v1.ClusterVersionList { return &v1.ClusterVersionList{} }), } } - -// Get takes name of the clusterVersion, and returns the corresponding clusterVersion object, and an error if there is any. -func (c *clusterVersions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterVersion, err error) { - result = &v1.ClusterVersion{} - err = c.client.Get(). - Resource("clusterversions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterVersions that match those selectors. -func (c *clusterVersions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterVersionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterVersionList{} - err = c.client.Get(). - Resource("clusterversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterVersions. -func (c *clusterVersions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterVersion and creates it. Returns the server's representation of the clusterVersion, and an error, if there is any. -func (c *clusterVersions) Create(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.CreateOptions) (result *v1.ClusterVersion, err error) { - result = &v1.ClusterVersion{} - err = c.client.Post(). - Resource("clusterversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterVersion). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterVersion and updates it. Returns the server's representation of the clusterVersion, and an error, if there is any. -func (c *clusterVersions) Update(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (result *v1.ClusterVersion, err error) { - result = &v1.ClusterVersion{} - err = c.client.Put(). - Resource("clusterversions"). - Name(clusterVersion.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterVersion). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *clusterVersions) UpdateStatus(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (result *v1.ClusterVersion, err error) { - result = &v1.ClusterVersion{} - err = c.client.Put(). - Resource("clusterversions"). - Name(clusterVersion.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterVersion). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterVersion and deletes it. Returns an error if one occurs. -func (c *clusterVersions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterversions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterVersions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterversions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterVersion. -func (c *clusterVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterVersion, err error) { - result = &v1.ClusterVersion{} - err = c.client.Patch(pt). - Resource("clusterversions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterVersion. -func (c *clusterVersions) Apply(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) { - if clusterVersion == nil { - return nil, fmt.Errorf("clusterVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterVersion) - if err != nil { - return nil, err - } - name := clusterVersion.Name - if name == nil { - return nil, fmt.Errorf("clusterVersion.Name must be provided to Apply") - } - result = &v1.ClusterVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterversions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *clusterVersions) ApplyStatus(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) { - if clusterVersion == nil { - return nil, fmt.Errorf("clusterVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterVersion) - if err != nil { - return nil, err - } - - name := clusterVersion.Name - if name == nil { - return nil, fmt.Errorf("clusterVersion.Name must be provided to Apply") - } - - result = &v1.ClusterVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterversions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go index 99c51bf97..994d5994d 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ConsolesGetter has a method to return a ConsoleInterface. @@ -27,6 +24,7 @@ type ConsolesGetter interface { type ConsoleInterface interface { Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (*v1.Console, error) Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ConsoleInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error) Apply(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) ConsoleExpansion } // consoles implements ConsoleInterface type consoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Console, *v1.ConsoleList, *configv1.ConsoleApplyConfiguration] } // newConsoles returns a Consoles func newConsoles(c *ConfigV1Client) *consoles { return &consoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Console, *v1.ConsoleList, *configv1.ConsoleApplyConfiguration]( + "consoles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Console { return &v1.Console{} }, + func() *v1.ConsoleList { return &v1.ConsoleList{} }), } } - -// Get takes name of the console, and returns the corresponding console object, and an error if there is any. -func (c *consoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Console, err error) { - result = &v1.Console{} - err = c.client.Get(). - Resource("consoles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Consoles that match those selectors. -func (c *consoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConsoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ConsoleList{} - err = c.client.Get(). - Resource("consoles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested consoles. -func (c *consoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("consoles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a console and creates it. Returns the server's representation of the console, and an error, if there is any. -func (c *consoles) Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (result *v1.Console, err error) { - result = &v1.Console{} - err = c.client.Post(). - Resource("consoles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(console). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a console and updates it. Returns the server's representation of the console, and an error, if there is any. -func (c *consoles) Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (result *v1.Console, err error) { - result = &v1.Console{} - err = c.client.Put(). - Resource("consoles"). - Name(console.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(console). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *consoles) UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (result *v1.Console, err error) { - result = &v1.Console{} - err = c.client.Put(). - Resource("consoles"). - Name(console.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(console). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the console and deletes it. Returns an error if one occurs. -func (c *consoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("consoles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *consoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("consoles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched console. -func (c *consoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error) { - result = &v1.Console{} - err = c.client.Patch(pt). - Resource("consoles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied console. -func (c *consoles) Apply(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) { - if console == nil { - return nil, fmt.Errorf("console provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(console) - if err != nil { - return nil, err - } - name := console.Name - if name == nil { - return nil, fmt.Errorf("console.Name must be provided to Apply") - } - result = &v1.Console{} - err = c.client.Patch(types.ApplyPatchType). - Resource("consoles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *consoles) ApplyStatus(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) { - if console == nil { - return nil, fmt.Errorf("console provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(console) - if err != nil { - return nil, err - } - - name := console.Name - if name == nil { - return nil, fmt.Errorf("console.Name must be provided to Apply") - } - - result = &v1.Console{} - err = c.client.Patch(types.ApplyPatchType). - Resource("consoles"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go index 86fbbcf95..b91aa0d0d 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // DNSesGetter has a method to return a DNSInterface. @@ -27,6 +24,7 @@ type DNSesGetter interface { type DNSInterface interface { Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (*v1.DNS, error) Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type DNSInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error) Apply(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) DNSExpansion } // dNSes implements DNSInterface type dNSes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.DNS, *v1.DNSList, *configv1.DNSApplyConfiguration] } // newDNSes returns a DNSes func newDNSes(c *ConfigV1Client) *dNSes { return &dNSes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.DNS, *v1.DNSList, *configv1.DNSApplyConfiguration]( + "dnses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.DNS { return &v1.DNS{} }, + func() *v1.DNSList { return &v1.DNSList{} }), } } - -// Get takes name of the dNS, and returns the corresponding dNS object, and an error if there is any. -func (c *dNSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DNS, err error) { - result = &v1.DNS{} - err = c.client.Get(). - Resource("dnses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DNSes that match those selectors. -func (c *dNSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DNSList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DNSList{} - err = c.client.Get(). - Resource("dnses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested dNSes. -func (c *dNSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("dnses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a dNS and creates it. Returns the server's representation of the dNS, and an error, if there is any. -func (c *dNSes) Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (result *v1.DNS, err error) { - result = &v1.DNS{} - err = c.client.Post(). - Resource("dnses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(dNS). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a dNS and updates it. Returns the server's representation of the dNS, and an error, if there is any. -func (c *dNSes) Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (result *v1.DNS, err error) { - result = &v1.DNS{} - err = c.client.Put(). - Resource("dnses"). - Name(dNS.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(dNS). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *dNSes) UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (result *v1.DNS, err error) { - result = &v1.DNS{} - err = c.client.Put(). - Resource("dnses"). - Name(dNS.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(dNS). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the dNS and deletes it. Returns an error if one occurs. -func (c *dNSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("dnses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *dNSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("dnses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched dNS. -func (c *dNSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error) { - result = &v1.DNS{} - err = c.client.Patch(pt). - Resource("dnses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied dNS. -func (c *dNSes) Apply(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) { - if dNS == nil { - return nil, fmt.Errorf("dNS provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(dNS) - if err != nil { - return nil, err - } - name := dNS.Name - if name == nil { - return nil, fmt.Errorf("dNS.Name must be provided to Apply") - } - result = &v1.DNS{} - err = c.client.Patch(types.ApplyPatchType). - Resource("dnses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *dNSes) ApplyStatus(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) { - if dNS == nil { - return nil, fmt.Errorf("dNS provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(dNS) - if err != nil { - return nil, err - } - - name := dNS.Name - if name == nil { - return nil, fmt.Errorf("dNS.Name must be provided to Apply") - } - - result = &v1.DNS{} - err = c.client.Patch(types.ApplyPatchType). - Resource("dnses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go index 112322c84..fcbfe3577 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // FeatureGatesGetter has a method to return a FeatureGateInterface. @@ -27,6 +24,7 @@ type FeatureGatesGetter interface { type FeatureGateInterface interface { Create(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.CreateOptions) (*v1.FeatureGate, error) Update(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (*v1.FeatureGate, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (*v1.FeatureGate, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type FeatureGateInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FeatureGate, err error) Apply(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) FeatureGateExpansion } // featureGates implements FeatureGateInterface type featureGates struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.FeatureGate, *v1.FeatureGateList, *configv1.FeatureGateApplyConfiguration] } // newFeatureGates returns a FeatureGates func newFeatureGates(c *ConfigV1Client) *featureGates { return &featureGates{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.FeatureGate, *v1.FeatureGateList, *configv1.FeatureGateApplyConfiguration]( + "featuregates", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.FeatureGate { return &v1.FeatureGate{} }, + func() *v1.FeatureGateList { return &v1.FeatureGateList{} }), } } - -// Get takes name of the featureGate, and returns the corresponding featureGate object, and an error if there is any. -func (c *featureGates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FeatureGate, err error) { - result = &v1.FeatureGate{} - err = c.client.Get(). - Resource("featuregates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FeatureGates that match those selectors. -func (c *featureGates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FeatureGateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.FeatureGateList{} - err = c.client.Get(). - Resource("featuregates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested featureGates. -func (c *featureGates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("featuregates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a featureGate and creates it. Returns the server's representation of the featureGate, and an error, if there is any. -func (c *featureGates) Create(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.CreateOptions) (result *v1.FeatureGate, err error) { - result = &v1.FeatureGate{} - err = c.client.Post(). - Resource("featuregates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(featureGate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a featureGate and updates it. Returns the server's representation of the featureGate, and an error, if there is any. -func (c *featureGates) Update(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (result *v1.FeatureGate, err error) { - result = &v1.FeatureGate{} - err = c.client.Put(). - Resource("featuregates"). - Name(featureGate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(featureGate). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *featureGates) UpdateStatus(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (result *v1.FeatureGate, err error) { - result = &v1.FeatureGate{} - err = c.client.Put(). - Resource("featuregates"). - Name(featureGate.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(featureGate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the featureGate and deletes it. Returns an error if one occurs. -func (c *featureGates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("featuregates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *featureGates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("featuregates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched featureGate. -func (c *featureGates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FeatureGate, err error) { - result = &v1.FeatureGate{} - err = c.client.Patch(pt). - Resource("featuregates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied featureGate. -func (c *featureGates) Apply(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) { - if featureGate == nil { - return nil, fmt.Errorf("featureGate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(featureGate) - if err != nil { - return nil, err - } - name := featureGate.Name - if name == nil { - return nil, fmt.Errorf("featureGate.Name must be provided to Apply") - } - result = &v1.FeatureGate{} - err = c.client.Patch(types.ApplyPatchType). - Resource("featuregates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *featureGates) ApplyStatus(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) { - if featureGate == nil { - return nil, fmt.Errorf("featureGate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(featureGate) - if err != nil { - return nil, err - } - - name := featureGate.Name - if name == nil { - return nil, fmt.Errorf("featureGate.Name must be provided to Apply") - } - - result = &v1.FeatureGate{} - err = c.client.Patch(types.ApplyPatchType). - Resource("featuregates"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go index 5357f96d8..536693d3e 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImagesGetter has a method to return a ImageInterface. @@ -27,6 +24,7 @@ type ImagesGetter interface { type ImageInterface interface { Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (*v1.Image, error) Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ImageInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) Apply(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) ImageExpansion } // images implements ImageInterface type images struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Image, *v1.ImageList, *configv1.ImageApplyConfiguration] } // newImages returns a Images func newImages(c *ConfigV1Client) *images { return &images{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Image, *v1.ImageList, *configv1.ImageApplyConfiguration]( + "images", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Image { return &v1.Image{} }, + func() *v1.ImageList { return &v1.ImageList{} }), } } - -// Get takes name of the image, and returns the corresponding image object, and an error if there is any. -func (c *images) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Get(). - Resource("images"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Images that match those selectors. -func (c *images) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageList{} - err = c.client.Get(). - Resource("images"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested images. -func (c *images) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("images"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. -func (c *images) Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Post(). - Resource("images"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(image). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. -func (c *images) Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Put(). - Resource("images"). - Name(image.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(image). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *images) UpdateStatus(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Put(). - Resource("images"). - Name(image.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(image). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the image and deletes it. Returns an error if one occurs. -func (c *images) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("images"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *images) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("images"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched image. -func (c *images) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Patch(pt). - Resource("images"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied image. -func (c *images) Apply(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) { - if image == nil { - return nil, fmt.Errorf("image provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(image) - if err != nil { - return nil, err - } - name := image.Name - if name == nil { - return nil, fmt.Errorf("image.Name must be provided to Apply") - } - result = &v1.Image{} - err = c.client.Patch(types.ApplyPatchType). - Resource("images"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *images) ApplyStatus(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) { - if image == nil { - return nil, fmt.Errorf("image provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(image) - if err != nil { - return nil, err - } - - name := image.Name - if name == nil { - return nil, fmt.Errorf("image.Name must be provided to Apply") - } - - result = &v1.Image{} - err = c.client.Patch(types.ApplyPatchType). - Resource("images"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go index 3128290ca..bc8a8b342 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageContentPoliciesGetter has a method to return a ImageContentPolicyInterface. @@ -39,143 +36,18 @@ type ImageContentPolicyInterface interface { // imageContentPolicies implements ImageContentPolicyInterface type imageContentPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ImageContentPolicy, *v1.ImageContentPolicyList, *configv1.ImageContentPolicyApplyConfiguration] } // newImageContentPolicies returns a ImageContentPolicies func newImageContentPolicies(c *ConfigV1Client) *imageContentPolicies { return &imageContentPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ImageContentPolicy, *v1.ImageContentPolicyList, *configv1.ImageContentPolicyApplyConfiguration]( + "imagecontentpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ImageContentPolicy { return &v1.ImageContentPolicy{} }, + func() *v1.ImageContentPolicyList { return &v1.ImageContentPolicyList{} }), } } - -// Get takes name of the imageContentPolicy, and returns the corresponding imageContentPolicy object, and an error if there is any. -func (c *imageContentPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageContentPolicy, err error) { - result = &v1.ImageContentPolicy{} - err = c.client.Get(). - Resource("imagecontentpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ImageContentPolicies that match those selectors. -func (c *imageContentPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageContentPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageContentPolicyList{} - err = c.client.Get(). - Resource("imagecontentpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested imageContentPolicies. -func (c *imageContentPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("imagecontentpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a imageContentPolicy and creates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. -func (c *imageContentPolicies) Create(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.CreateOptions) (result *v1.ImageContentPolicy, err error) { - result = &v1.ImageContentPolicy{} - err = c.client.Post(). - Resource("imagecontentpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageContentPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a imageContentPolicy and updates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. -func (c *imageContentPolicies) Update(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.UpdateOptions) (result *v1.ImageContentPolicy, err error) { - result = &v1.ImageContentPolicy{} - err = c.client.Put(). - Resource("imagecontentpolicies"). - Name(imageContentPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageContentPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageContentPolicy and deletes it. Returns an error if one occurs. -func (c *imageContentPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("imagecontentpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *imageContentPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("imagecontentpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched imageContentPolicy. -func (c *imageContentPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageContentPolicy, err error) { - result = &v1.ImageContentPolicy{} - err = c.client.Patch(pt). - Resource("imagecontentpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied imageContentPolicy. -func (c *imageContentPolicies) Apply(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageContentPolicy, err error) { - if imageContentPolicy == nil { - return nil, fmt.Errorf("imageContentPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageContentPolicy) - if err != nil { - return nil, err - } - name := imageContentPolicy.Name - if name == nil { - return nil, fmt.Errorf("imageContentPolicy.Name must be provided to Apply") - } - result = &v1.ImageContentPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("imagecontentpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go index 65e01a844..eca9b95b6 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageDigestMirrorSetsGetter has a method to return a ImageDigestMirrorSetInterface. @@ -27,6 +24,7 @@ type ImageDigestMirrorSetsGetter interface { type ImageDigestMirrorSetInterface interface { Create(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.CreateOptions) (*v1.ImageDigestMirrorSet, error) Update(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*v1.ImageDigestMirrorSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*v1.ImageDigestMirrorSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ImageDigestMirrorSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageDigestMirrorSet, err error) Apply(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) ImageDigestMirrorSetExpansion } // imageDigestMirrorSets implements ImageDigestMirrorSetInterface type imageDigestMirrorSets struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ImageDigestMirrorSet, *v1.ImageDigestMirrorSetList, *configv1.ImageDigestMirrorSetApplyConfiguration] } // newImageDigestMirrorSets returns a ImageDigestMirrorSets func newImageDigestMirrorSets(c *ConfigV1Client) *imageDigestMirrorSets { return &imageDigestMirrorSets{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ImageDigestMirrorSet, *v1.ImageDigestMirrorSetList, *configv1.ImageDigestMirrorSetApplyConfiguration]( + "imagedigestmirrorsets", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ImageDigestMirrorSet { return &v1.ImageDigestMirrorSet{} }, + func() *v1.ImageDigestMirrorSetList { return &v1.ImageDigestMirrorSetList{} }), } } - -// Get takes name of the imageDigestMirrorSet, and returns the corresponding imageDigestMirrorSet object, and an error if there is any. -func (c *imageDigestMirrorSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageDigestMirrorSet, err error) { - result = &v1.ImageDigestMirrorSet{} - err = c.client.Get(). - Resource("imagedigestmirrorsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ImageDigestMirrorSets that match those selectors. -func (c *imageDigestMirrorSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageDigestMirrorSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageDigestMirrorSetList{} - err = c.client.Get(). - Resource("imagedigestmirrorsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested imageDigestMirrorSets. -func (c *imageDigestMirrorSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("imagedigestmirrorsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a imageDigestMirrorSet and creates it. Returns the server's representation of the imageDigestMirrorSet, and an error, if there is any. -func (c *imageDigestMirrorSets) Create(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.CreateOptions) (result *v1.ImageDigestMirrorSet, err error) { - result = &v1.ImageDigestMirrorSet{} - err = c.client.Post(). - Resource("imagedigestmirrorsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageDigestMirrorSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a imageDigestMirrorSet and updates it. Returns the server's representation of the imageDigestMirrorSet, and an error, if there is any. -func (c *imageDigestMirrorSets) Update(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageDigestMirrorSet, err error) { - result = &v1.ImageDigestMirrorSet{} - err = c.client.Put(). - Resource("imagedigestmirrorsets"). - Name(imageDigestMirrorSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageDigestMirrorSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *imageDigestMirrorSets) UpdateStatus(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageDigestMirrorSet, err error) { - result = &v1.ImageDigestMirrorSet{} - err = c.client.Put(). - Resource("imagedigestmirrorsets"). - Name(imageDigestMirrorSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageDigestMirrorSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageDigestMirrorSet and deletes it. Returns an error if one occurs. -func (c *imageDigestMirrorSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("imagedigestmirrorsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *imageDigestMirrorSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("imagedigestmirrorsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched imageDigestMirrorSet. -func (c *imageDigestMirrorSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageDigestMirrorSet, err error) { - result = &v1.ImageDigestMirrorSet{} - err = c.client.Patch(pt). - Resource("imagedigestmirrorsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied imageDigestMirrorSet. -func (c *imageDigestMirrorSets) Apply(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) { - if imageDigestMirrorSet == nil { - return nil, fmt.Errorf("imageDigestMirrorSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageDigestMirrorSet) - if err != nil { - return nil, err - } - name := imageDigestMirrorSet.Name - if name == nil { - return nil, fmt.Errorf("imageDigestMirrorSet.Name must be provided to Apply") - } - result = &v1.ImageDigestMirrorSet{} - err = c.client.Patch(types.ApplyPatchType). - Resource("imagedigestmirrorsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *imageDigestMirrorSets) ApplyStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) { - if imageDigestMirrorSet == nil { - return nil, fmt.Errorf("imageDigestMirrorSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageDigestMirrorSet) - if err != nil { - return nil, err - } - - name := imageDigestMirrorSet.Name - if name == nil { - return nil, fmt.Errorf("imageDigestMirrorSet.Name must be provided to Apply") - } - - result = &v1.ImageDigestMirrorSet{} - err = c.client.Patch(types.ApplyPatchType). - Resource("imagedigestmirrorsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go index dc8337970..add148a55 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageTagMirrorSetsGetter has a method to return a ImageTagMirrorSetInterface. @@ -27,6 +24,7 @@ type ImageTagMirrorSetsGetter interface { type ImageTagMirrorSetInterface interface { Create(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.CreateOptions) (*v1.ImageTagMirrorSet, error) Update(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*v1.ImageTagMirrorSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*v1.ImageTagMirrorSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ImageTagMirrorSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageTagMirrorSet, err error) Apply(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) ImageTagMirrorSetExpansion } // imageTagMirrorSets implements ImageTagMirrorSetInterface type imageTagMirrorSets struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ImageTagMirrorSet, *v1.ImageTagMirrorSetList, *configv1.ImageTagMirrorSetApplyConfiguration] } // newImageTagMirrorSets returns a ImageTagMirrorSets func newImageTagMirrorSets(c *ConfigV1Client) *imageTagMirrorSets { return &imageTagMirrorSets{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ImageTagMirrorSet, *v1.ImageTagMirrorSetList, *configv1.ImageTagMirrorSetApplyConfiguration]( + "imagetagmirrorsets", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ImageTagMirrorSet { return &v1.ImageTagMirrorSet{} }, + func() *v1.ImageTagMirrorSetList { return &v1.ImageTagMirrorSetList{} }), } } - -// Get takes name of the imageTagMirrorSet, and returns the corresponding imageTagMirrorSet object, and an error if there is any. -func (c *imageTagMirrorSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageTagMirrorSet, err error) { - result = &v1.ImageTagMirrorSet{} - err = c.client.Get(). - Resource("imagetagmirrorsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ImageTagMirrorSets that match those selectors. -func (c *imageTagMirrorSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageTagMirrorSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageTagMirrorSetList{} - err = c.client.Get(). - Resource("imagetagmirrorsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested imageTagMirrorSets. -func (c *imageTagMirrorSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("imagetagmirrorsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a imageTagMirrorSet and creates it. Returns the server's representation of the imageTagMirrorSet, and an error, if there is any. -func (c *imageTagMirrorSets) Create(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.CreateOptions) (result *v1.ImageTagMirrorSet, err error) { - result = &v1.ImageTagMirrorSet{} - err = c.client.Post(). - Resource("imagetagmirrorsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageTagMirrorSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a imageTagMirrorSet and updates it. Returns the server's representation of the imageTagMirrorSet, and an error, if there is any. -func (c *imageTagMirrorSets) Update(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageTagMirrorSet, err error) { - result = &v1.ImageTagMirrorSet{} - err = c.client.Put(). - Resource("imagetagmirrorsets"). - Name(imageTagMirrorSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageTagMirrorSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *imageTagMirrorSets) UpdateStatus(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageTagMirrorSet, err error) { - result = &v1.ImageTagMirrorSet{} - err = c.client.Put(). - Resource("imagetagmirrorsets"). - Name(imageTagMirrorSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageTagMirrorSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageTagMirrorSet and deletes it. Returns an error if one occurs. -func (c *imageTagMirrorSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("imagetagmirrorsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *imageTagMirrorSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("imagetagmirrorsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched imageTagMirrorSet. -func (c *imageTagMirrorSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageTagMirrorSet, err error) { - result = &v1.ImageTagMirrorSet{} - err = c.client.Patch(pt). - Resource("imagetagmirrorsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied imageTagMirrorSet. -func (c *imageTagMirrorSets) Apply(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) { - if imageTagMirrorSet == nil { - return nil, fmt.Errorf("imageTagMirrorSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageTagMirrorSet) - if err != nil { - return nil, err - } - name := imageTagMirrorSet.Name - if name == nil { - return nil, fmt.Errorf("imageTagMirrorSet.Name must be provided to Apply") - } - result = &v1.ImageTagMirrorSet{} - err = c.client.Patch(types.ApplyPatchType). - Resource("imagetagmirrorsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *imageTagMirrorSets) ApplyStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) { - if imageTagMirrorSet == nil { - return nil, fmt.Errorf("imageTagMirrorSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageTagMirrorSet) - if err != nil { - return nil, err - } - - name := imageTagMirrorSet.Name - if name == nil { - return nil, fmt.Errorf("imageTagMirrorSet.Name must be provided to Apply") - } - - result = &v1.ImageTagMirrorSet{} - err = c.client.Patch(types.ApplyPatchType). - Resource("imagetagmirrorsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go index c3728aa83..4d8550833 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // InfrastructuresGetter has a method to return a InfrastructureInterface. @@ -27,6 +24,7 @@ type InfrastructuresGetter interface { type InfrastructureInterface interface { Create(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.CreateOptions) (*v1.Infrastructure, error) Update(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (*v1.Infrastructure, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (*v1.Infrastructure, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type InfrastructureInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Infrastructure, err error) Apply(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) InfrastructureExpansion } // infrastructures implements InfrastructureInterface type infrastructures struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Infrastructure, *v1.InfrastructureList, *configv1.InfrastructureApplyConfiguration] } // newInfrastructures returns a Infrastructures func newInfrastructures(c *ConfigV1Client) *infrastructures { return &infrastructures{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Infrastructure, *v1.InfrastructureList, *configv1.InfrastructureApplyConfiguration]( + "infrastructures", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Infrastructure { return &v1.Infrastructure{} }, + func() *v1.InfrastructureList { return &v1.InfrastructureList{} }), } } - -// Get takes name of the infrastructure, and returns the corresponding infrastructure object, and an error if there is any. -func (c *infrastructures) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Infrastructure, err error) { - result = &v1.Infrastructure{} - err = c.client.Get(). - Resource("infrastructures"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Infrastructures that match those selectors. -func (c *infrastructures) List(ctx context.Context, opts metav1.ListOptions) (result *v1.InfrastructureList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.InfrastructureList{} - err = c.client.Get(). - Resource("infrastructures"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested infrastructures. -func (c *infrastructures) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("infrastructures"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a infrastructure and creates it. Returns the server's representation of the infrastructure, and an error, if there is any. -func (c *infrastructures) Create(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.CreateOptions) (result *v1.Infrastructure, err error) { - result = &v1.Infrastructure{} - err = c.client.Post(). - Resource("infrastructures"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(infrastructure). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a infrastructure and updates it. Returns the server's representation of the infrastructure, and an error, if there is any. -func (c *infrastructures) Update(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (result *v1.Infrastructure, err error) { - result = &v1.Infrastructure{} - err = c.client.Put(). - Resource("infrastructures"). - Name(infrastructure.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(infrastructure). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *infrastructures) UpdateStatus(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (result *v1.Infrastructure, err error) { - result = &v1.Infrastructure{} - err = c.client.Put(). - Resource("infrastructures"). - Name(infrastructure.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(infrastructure). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the infrastructure and deletes it. Returns an error if one occurs. -func (c *infrastructures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("infrastructures"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *infrastructures) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("infrastructures"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched infrastructure. -func (c *infrastructures) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Infrastructure, err error) { - result = &v1.Infrastructure{} - err = c.client.Patch(pt). - Resource("infrastructures"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied infrastructure. -func (c *infrastructures) Apply(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) { - if infrastructure == nil { - return nil, fmt.Errorf("infrastructure provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(infrastructure) - if err != nil { - return nil, err - } - name := infrastructure.Name - if name == nil { - return nil, fmt.Errorf("infrastructure.Name must be provided to Apply") - } - result = &v1.Infrastructure{} - err = c.client.Patch(types.ApplyPatchType). - Resource("infrastructures"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *infrastructures) ApplyStatus(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) { - if infrastructure == nil { - return nil, fmt.Errorf("infrastructure provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(infrastructure) - if err != nil { - return nil, err - } - - name := infrastructure.Name - if name == nil { - return nil, fmt.Errorf("infrastructure.Name must be provided to Apply") - } - - result = &v1.Infrastructure{} - err = c.client.Patch(types.ApplyPatchType). - Resource("infrastructures"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go index 4d909f884..cc4fa53c4 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // IngressesGetter has a method to return a IngressInterface. @@ -27,6 +24,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) Apply(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *configv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ConfigV1Client) *ingresses { return &ingresses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *configv1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Ingress { return &v1.Ingress{} }, + func() *v1.IngressList { return &v1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Get(). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressList{} - err = c.client.Get(). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Post(). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Patch(pt). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go index d9d8a4e47..ca61ca93a 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // NetworksGetter has a method to return a NetworkInterface. @@ -27,6 +24,7 @@ type NetworksGetter interface { type NetworkInterface interface { Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (*v1.Network, error) Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type NetworkInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error) Apply(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) NetworkExpansion } // networks implements NetworkInterface type networks struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Network, *v1.NetworkList, *configv1.NetworkApplyConfiguration] } // newNetworks returns a Networks func newNetworks(c *ConfigV1Client) *networks { return &networks{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Network, *v1.NetworkList, *configv1.NetworkApplyConfiguration]( + "networks", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Network { return &v1.Network{} }, + func() *v1.NetworkList { return &v1.NetworkList{} }), } } - -// Get takes name of the network, and returns the corresponding network object, and an error if there is any. -func (c *networks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Network, err error) { - result = &v1.Network{} - err = c.client.Get(). - Resource("networks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Networks that match those selectors. -func (c *networks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NetworkList{} - err = c.client.Get(). - Resource("networks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networks. -func (c *networks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("networks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any. -func (c *networks) Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (result *v1.Network, err error) { - result = &v1.Network{} - err = c.client.Post(). - Resource("networks"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(network). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any. -func (c *networks) Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (result *v1.Network, err error) { - result = &v1.Network{} - err = c.client.Put(). - Resource("networks"). - Name(network.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(network). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *networks) UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (result *v1.Network, err error) { - result = &v1.Network{} - err = c.client.Put(). - Resource("networks"). - Name(network.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(network). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the network and deletes it. Returns an error if one occurs. -func (c *networks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("networks"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("networks"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched network. -func (c *networks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error) { - result = &v1.Network{} - err = c.client.Patch(pt). - Resource("networks"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied network. -func (c *networks) Apply(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) { - if network == nil { - return nil, fmt.Errorf("network provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(network) - if err != nil { - return nil, err - } - name := network.Name - if name == nil { - return nil, fmt.Errorf("network.Name must be provided to Apply") - } - result = &v1.Network{} - err = c.client.Patch(types.ApplyPatchType). - Resource("networks"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *networks) ApplyStatus(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) { - if network == nil { - return nil, fmt.Errorf("network provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(network) - if err != nil { - return nil, err - } - - name := network.Name - if name == nil { - return nil, fmt.Errorf("network.Name must be provided to Apply") - } - - result = &v1.Network{} - err = c.client.Patch(types.ApplyPatchType). - Resource("networks"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go index 6c7969c5a..0ec0e5e5a 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // NodesGetter has a method to return a NodeInterface. @@ -27,6 +24,7 @@ type NodesGetter interface { type NodeInterface interface { Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type NodeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) Apply(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *configv1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *ConfigV1Client) *nodes { return &nodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *configv1.NodeApplyConfiguration]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Node { return &v1.Node{} }, + func() *v1.NodeList { return &v1.NodeList{} }), } } - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Post(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *nodes) Apply(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *nodes) ApplyStatus(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go index b418cc046..d6f7814aa 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // OAuthsGetter has a method to return a OAuthInterface. @@ -27,6 +24,7 @@ type OAuthsGetter interface { type OAuthInterface interface { Create(ctx context.Context, oAuth *v1.OAuth, opts metav1.CreateOptions) (*v1.OAuth, error) Update(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (*v1.OAuth, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (*v1.OAuth, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type OAuthInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OAuth, err error) Apply(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) OAuthExpansion } // oAuths implements OAuthInterface type oAuths struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.OAuth, *v1.OAuthList, *configv1.OAuthApplyConfiguration] } // newOAuths returns a OAuths func newOAuths(c *ConfigV1Client) *oAuths { return &oAuths{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.OAuth, *v1.OAuthList, *configv1.OAuthApplyConfiguration]( + "oauths", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.OAuth { return &v1.OAuth{} }, + func() *v1.OAuthList { return &v1.OAuthList{} }), } } - -// Get takes name of the oAuth, and returns the corresponding oAuth object, and an error if there is any. -func (c *oAuths) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OAuth, err error) { - result = &v1.OAuth{} - err = c.client.Get(). - Resource("oauths"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of OAuths that match those selectors. -func (c *oAuths) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OAuthList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.OAuthList{} - err = c.client.Get(). - Resource("oauths"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested oAuths. -func (c *oAuths) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("oauths"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a oAuth and creates it. Returns the server's representation of the oAuth, and an error, if there is any. -func (c *oAuths) Create(ctx context.Context, oAuth *v1.OAuth, opts metav1.CreateOptions) (result *v1.OAuth, err error) { - result = &v1.OAuth{} - err = c.client.Post(). - Resource("oauths"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(oAuth). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a oAuth and updates it. Returns the server's representation of the oAuth, and an error, if there is any. -func (c *oAuths) Update(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (result *v1.OAuth, err error) { - result = &v1.OAuth{} - err = c.client.Put(). - Resource("oauths"). - Name(oAuth.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(oAuth). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *oAuths) UpdateStatus(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (result *v1.OAuth, err error) { - result = &v1.OAuth{} - err = c.client.Put(). - Resource("oauths"). - Name(oAuth.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(oAuth). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the oAuth and deletes it. Returns an error if one occurs. -func (c *oAuths) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("oauths"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *oAuths) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("oauths"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched oAuth. -func (c *oAuths) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OAuth, err error) { - result = &v1.OAuth{} - err = c.client.Patch(pt). - Resource("oauths"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied oAuth. -func (c *oAuths) Apply(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) { - if oAuth == nil { - return nil, fmt.Errorf("oAuth provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(oAuth) - if err != nil { - return nil, err - } - name := oAuth.Name - if name == nil { - return nil, fmt.Errorf("oAuth.Name must be provided to Apply") - } - result = &v1.OAuth{} - err = c.client.Patch(types.ApplyPatchType). - Resource("oauths"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *oAuths) ApplyStatus(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) { - if oAuth == nil { - return nil, fmt.Errorf("oAuth provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(oAuth) - if err != nil { - return nil, err - } - - name := oAuth.Name - if name == nil { - return nil, fmt.Errorf("oAuth.Name must be provided to Apply") - } - - result = &v1.OAuth{} - err = c.client.Patch(types.ApplyPatchType). - Resource("oauths"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go index 67b7e0f89..c8a291486 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // OperatorHubsGetter has a method to return a OperatorHubInterface. @@ -27,6 +24,7 @@ type OperatorHubsGetter interface { type OperatorHubInterface interface { Create(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.CreateOptions) (*v1.OperatorHub, error) Update(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (*v1.OperatorHub, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (*v1.OperatorHub, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type OperatorHubInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OperatorHub, err error) Apply(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) OperatorHubExpansion } // operatorHubs implements OperatorHubInterface type operatorHubs struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.OperatorHub, *v1.OperatorHubList, *configv1.OperatorHubApplyConfiguration] } // newOperatorHubs returns a OperatorHubs func newOperatorHubs(c *ConfigV1Client) *operatorHubs { return &operatorHubs{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.OperatorHub, *v1.OperatorHubList, *configv1.OperatorHubApplyConfiguration]( + "operatorhubs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.OperatorHub { return &v1.OperatorHub{} }, + func() *v1.OperatorHubList { return &v1.OperatorHubList{} }), } } - -// Get takes name of the operatorHub, and returns the corresponding operatorHub object, and an error if there is any. -func (c *operatorHubs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OperatorHub, err error) { - result = &v1.OperatorHub{} - err = c.client.Get(). - Resource("operatorhubs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of OperatorHubs that match those selectors. -func (c *operatorHubs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OperatorHubList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.OperatorHubList{} - err = c.client.Get(). - Resource("operatorhubs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested operatorHubs. -func (c *operatorHubs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("operatorhubs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a operatorHub and creates it. Returns the server's representation of the operatorHub, and an error, if there is any. -func (c *operatorHubs) Create(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.CreateOptions) (result *v1.OperatorHub, err error) { - result = &v1.OperatorHub{} - err = c.client.Post(). - Resource("operatorhubs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(operatorHub). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a operatorHub and updates it. Returns the server's representation of the operatorHub, and an error, if there is any. -func (c *operatorHubs) Update(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (result *v1.OperatorHub, err error) { - result = &v1.OperatorHub{} - err = c.client.Put(). - Resource("operatorhubs"). - Name(operatorHub.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(operatorHub). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *operatorHubs) UpdateStatus(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (result *v1.OperatorHub, err error) { - result = &v1.OperatorHub{} - err = c.client.Put(). - Resource("operatorhubs"). - Name(operatorHub.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(operatorHub). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the operatorHub and deletes it. Returns an error if one occurs. -func (c *operatorHubs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("operatorhubs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *operatorHubs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("operatorhubs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched operatorHub. -func (c *operatorHubs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OperatorHub, err error) { - result = &v1.OperatorHub{} - err = c.client.Patch(pt). - Resource("operatorhubs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied operatorHub. -func (c *operatorHubs) Apply(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) { - if operatorHub == nil { - return nil, fmt.Errorf("operatorHub provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(operatorHub) - if err != nil { - return nil, err - } - name := operatorHub.Name - if name == nil { - return nil, fmt.Errorf("operatorHub.Name must be provided to Apply") - } - result = &v1.OperatorHub{} - err = c.client.Patch(types.ApplyPatchType). - Resource("operatorhubs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *operatorHubs) ApplyStatus(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) { - if operatorHub == nil { - return nil, fmt.Errorf("operatorHub provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(operatorHub) - if err != nil { - return nil, err - } - - name := operatorHub.Name - if name == nil { - return nil, fmt.Errorf("operatorHub.Name must be provided to Apply") - } - - result = &v1.OperatorHub{} - err = c.client.Patch(types.ApplyPatchType). - Resource("operatorhubs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go index cada42734..75cd01ec1 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ProjectsGetter has a method to return a ProjectInterface. @@ -27,6 +24,7 @@ type ProjectsGetter interface { type ProjectInterface interface { Create(ctx context.Context, project *v1.Project, opts metav1.CreateOptions) (*v1.Project, error) Update(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (*v1.Project, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (*v1.Project, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ProjectInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Project, err error) Apply(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) ProjectExpansion } // projects implements ProjectInterface type projects struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Project, *v1.ProjectList, *configv1.ProjectApplyConfiguration] } // newProjects returns a Projects func newProjects(c *ConfigV1Client) *projects { return &projects{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Project, *v1.ProjectList, *configv1.ProjectApplyConfiguration]( + "projects", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Project { return &v1.Project{} }, + func() *v1.ProjectList { return &v1.ProjectList{} }), } } - -// Get takes name of the project, and returns the corresponding project object, and an error if there is any. -func (c *projects) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Project, err error) { - result = &v1.Project{} - err = c.client.Get(). - Resource("projects"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Projects that match those selectors. -func (c *projects) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ProjectList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ProjectList{} - err = c.client.Get(). - Resource("projects"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested projects. -func (c *projects) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("projects"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any. -func (c *projects) Create(ctx context.Context, project *v1.Project, opts metav1.CreateOptions) (result *v1.Project, err error) { - result = &v1.Project{} - err = c.client.Post(). - Resource("projects"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(project). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any. -func (c *projects) Update(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (result *v1.Project, err error) { - result = &v1.Project{} - err = c.client.Put(). - Resource("projects"). - Name(project.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(project). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *projects) UpdateStatus(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (result *v1.Project, err error) { - result = &v1.Project{} - err = c.client.Put(). - Resource("projects"). - Name(project.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(project). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the project and deletes it. Returns an error if one occurs. -func (c *projects) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("projects"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *projects) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("projects"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched project. -func (c *projects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Project, err error) { - result = &v1.Project{} - err = c.client.Patch(pt). - Resource("projects"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied project. -func (c *projects) Apply(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) { - if project == nil { - return nil, fmt.Errorf("project provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(project) - if err != nil { - return nil, err - } - name := project.Name - if name == nil { - return nil, fmt.Errorf("project.Name must be provided to Apply") - } - result = &v1.Project{} - err = c.client.Patch(types.ApplyPatchType). - Resource("projects"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *projects) ApplyStatus(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) { - if project == nil { - return nil, fmt.Errorf("project provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(project) - if err != nil { - return nil, err - } - - name := project.Name - if name == nil { - return nil, fmt.Errorf("project.Name must be provided to Apply") - } - - result = &v1.Project{} - err = c.client.Patch(types.ApplyPatchType). - Resource("projects"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go index a187dc8fc..dfa2b4d19 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ProxiesGetter has a method to return a ProxyInterface. @@ -27,6 +24,7 @@ type ProxiesGetter interface { type ProxyInterface interface { Create(ctx context.Context, proxy *v1.Proxy, opts metav1.CreateOptions) (*v1.Proxy, error) Update(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (*v1.Proxy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (*v1.Proxy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type ProxyInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Proxy, err error) Apply(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) ProxyExpansion } // proxies implements ProxyInterface type proxies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Proxy, *v1.ProxyList, *configv1.ProxyApplyConfiguration] } // newProxies returns a Proxies func newProxies(c *ConfigV1Client) *proxies { return &proxies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Proxy, *v1.ProxyList, *configv1.ProxyApplyConfiguration]( + "proxies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Proxy { return &v1.Proxy{} }, + func() *v1.ProxyList { return &v1.ProxyList{} }), } } - -// Get takes name of the proxy, and returns the corresponding proxy object, and an error if there is any. -func (c *proxies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Proxy, err error) { - result = &v1.Proxy{} - err = c.client.Get(). - Resource("proxies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Proxies that match those selectors. -func (c *proxies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ProxyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ProxyList{} - err = c.client.Get(). - Resource("proxies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested proxies. -func (c *proxies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("proxies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a proxy and creates it. Returns the server's representation of the proxy, and an error, if there is any. -func (c *proxies) Create(ctx context.Context, proxy *v1.Proxy, opts metav1.CreateOptions) (result *v1.Proxy, err error) { - result = &v1.Proxy{} - err = c.client.Post(). - Resource("proxies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(proxy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a proxy and updates it. Returns the server's representation of the proxy, and an error, if there is any. -func (c *proxies) Update(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (result *v1.Proxy, err error) { - result = &v1.Proxy{} - err = c.client.Put(). - Resource("proxies"). - Name(proxy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(proxy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *proxies) UpdateStatus(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (result *v1.Proxy, err error) { - result = &v1.Proxy{} - err = c.client.Put(). - Resource("proxies"). - Name(proxy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(proxy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the proxy and deletes it. Returns an error if one occurs. -func (c *proxies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("proxies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *proxies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("proxies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched proxy. -func (c *proxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Proxy, err error) { - result = &v1.Proxy{} - err = c.client.Patch(pt). - Resource("proxies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied proxy. -func (c *proxies) Apply(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) { - if proxy == nil { - return nil, fmt.Errorf("proxy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(proxy) - if err != nil { - return nil, err - } - name := proxy.Name - if name == nil { - return nil, fmt.Errorf("proxy.Name must be provided to Apply") - } - result = &v1.Proxy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("proxies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *proxies) ApplyStatus(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) { - if proxy == nil { - return nil, fmt.Errorf("proxy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(proxy) - if err != nil { - return nil, err - } - - name := proxy.Name - if name == nil { - return nil, fmt.Errorf("proxy.Name must be provided to Apply") - } - - result = &v1.Proxy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("proxies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go index da67a4f06..7d459060e 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // SchedulersGetter has a method to return a SchedulerInterface. @@ -27,6 +24,7 @@ type SchedulersGetter interface { type SchedulerInterface interface { Create(ctx context.Context, scheduler *v1.Scheduler, opts metav1.CreateOptions) (*v1.Scheduler, error) Update(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (*v1.Scheduler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (*v1.Scheduler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,193 +33,25 @@ type SchedulerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Scheduler, err error) Apply(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) SchedulerExpansion } // schedulers implements SchedulerInterface type schedulers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Scheduler, *v1.SchedulerList, *configv1.SchedulerApplyConfiguration] } // newSchedulers returns a Schedulers func newSchedulers(c *ConfigV1Client) *schedulers { return &schedulers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Scheduler, *v1.SchedulerList, *configv1.SchedulerApplyConfiguration]( + "schedulers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Scheduler { return &v1.Scheduler{} }, + func() *v1.SchedulerList { return &v1.SchedulerList{} }), } } - -// Get takes name of the scheduler, and returns the corresponding scheduler object, and an error if there is any. -func (c *schedulers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Scheduler, err error) { - result = &v1.Scheduler{} - err = c.client.Get(). - Resource("schedulers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Schedulers that match those selectors. -func (c *schedulers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SchedulerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.SchedulerList{} - err = c.client.Get(). - Resource("schedulers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested schedulers. -func (c *schedulers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("schedulers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a scheduler and creates it. Returns the server's representation of the scheduler, and an error, if there is any. -func (c *schedulers) Create(ctx context.Context, scheduler *v1.Scheduler, opts metav1.CreateOptions) (result *v1.Scheduler, err error) { - result = &v1.Scheduler{} - err = c.client.Post(). - Resource("schedulers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(scheduler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a scheduler and updates it. Returns the server's representation of the scheduler, and an error, if there is any. -func (c *schedulers) Update(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (result *v1.Scheduler, err error) { - result = &v1.Scheduler{} - err = c.client.Put(). - Resource("schedulers"). - Name(scheduler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(scheduler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *schedulers) UpdateStatus(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (result *v1.Scheduler, err error) { - result = &v1.Scheduler{} - err = c.client.Put(). - Resource("schedulers"). - Name(scheduler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(scheduler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the scheduler and deletes it. Returns an error if one occurs. -func (c *schedulers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("schedulers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *schedulers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("schedulers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched scheduler. -func (c *schedulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Scheduler, err error) { - result = &v1.Scheduler{} - err = c.client.Patch(pt). - Resource("schedulers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied scheduler. -func (c *schedulers) Apply(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) { - if scheduler == nil { - return nil, fmt.Errorf("scheduler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scheduler) - if err != nil { - return nil, err - } - name := scheduler.Name - if name == nil { - return nil, fmt.Errorf("scheduler.Name must be provided to Apply") - } - result = &v1.Scheduler{} - err = c.client.Patch(types.ApplyPatchType). - Resource("schedulers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *schedulers) ApplyStatus(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) { - if scheduler == nil { - return nil, fmt.Errorf("scheduler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scheduler) - if err != nil { - return nil, err - } - - name := scheduler.Name - if name == nil { - return nil, fmt.Errorf("scheduler.Name must be provided to Apply") - } - - result = &v1.Scheduler{} - err = c.client.Patch(types.ApplyPatchType). - Resource("schedulers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go index a18bea315..baddceac6 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go @@ -12,7 +12,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageApplyConfiguration represents an declarative configuration of the Image type for use +// ImageApplyConfiguration represents a declarative configuration of the Image type for use // with apply. type ImageApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -29,7 +29,7 @@ type ImageApplyConfiguration struct { DockerImageManifests []ImageManifestApplyConfiguration `json:"dockerImageManifests,omitempty"` } -// Image constructs an declarative configuration of the Image type for use with +// Image constructs a declarative configuration of the Image type for use with // apply. func Image(name string) *ImageApplyConfiguration { b := &ImageApplyConfiguration{} @@ -328,3 +328,9 @@ func (b *ImageApplyConfiguration) WithDockerImageManifests(values ...*ImageManif } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go index e03e79a31..09dc0f931 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go @@ -2,7 +2,7 @@ package v1 -// ImageLayerApplyConfiguration represents an declarative configuration of the ImageLayer type for use +// ImageLayerApplyConfiguration represents a declarative configuration of the ImageLayer type for use // with apply. type ImageLayerApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -10,7 +10,7 @@ type ImageLayerApplyConfiguration struct { MediaType *string `json:"mediaType,omitempty"` } -// ImageLayerApplyConfiguration constructs an declarative configuration of the ImageLayer type for use with +// ImageLayerApplyConfiguration constructs a declarative configuration of the ImageLayer type for use with // apply. func ImageLayer() *ImageLayerApplyConfiguration { return &ImageLayerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go index db4336f30..ecc95d10b 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go @@ -2,13 +2,13 @@ package v1 -// ImageLookupPolicyApplyConfiguration represents an declarative configuration of the ImageLookupPolicy type for use +// ImageLookupPolicyApplyConfiguration represents a declarative configuration of the ImageLookupPolicy type for use // with apply. type ImageLookupPolicyApplyConfiguration struct { Local *bool `json:"local,omitempty"` } -// ImageLookupPolicyApplyConfiguration constructs an declarative configuration of the ImageLookupPolicy type for use with +// ImageLookupPolicyApplyConfiguration constructs a declarative configuration of the ImageLookupPolicy type for use with // apply. func ImageLookupPolicy() *ImageLookupPolicyApplyConfiguration { return &ImageLookupPolicyApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go index 34f0ac8ee..5368f96a6 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go @@ -2,7 +2,7 @@ package v1 -// ImageManifestApplyConfiguration represents an declarative configuration of the ImageManifest type for use +// ImageManifestApplyConfiguration represents a declarative configuration of the ImageManifest type for use // with apply. type ImageManifestApplyConfiguration struct { Digest *string `json:"digest,omitempty"` @@ -13,7 +13,7 @@ type ImageManifestApplyConfiguration struct { Variant *string `json:"variant,omitempty"` } -// ImageManifestApplyConfiguration constructs an declarative configuration of the ImageManifest type for use with +// ImageManifestApplyConfiguration constructs a declarative configuration of the ImageManifest type for use with // apply. func ImageManifest() *ImageManifestApplyConfiguration { return &ImageManifestApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go index f3995b240..d12f83266 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go @@ -8,7 +8,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageSignatureApplyConfiguration represents an declarative configuration of the ImageSignature type for use +// ImageSignatureApplyConfiguration represents a declarative configuration of the ImageSignature type for use // with apply. type ImageSignatureApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -23,7 +23,7 @@ type ImageSignatureApplyConfiguration struct { IssuedTo *SignatureSubjectApplyConfiguration `json:"issuedTo,omitempty"` } -// ImageSignature constructs an declarative configuration of the ImageSignature type for use with +// ImageSignature constructs a declarative configuration of the ImageSignature type for use with // apply. func ImageSignature(name string) *ImageSignatureApplyConfiguration { b := &ImageSignatureApplyConfiguration{} @@ -267,3 +267,9 @@ func (b *ImageSignatureApplyConfiguration) WithIssuedTo(value *SignatureSubjectA b.IssuedTo = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageSignatureApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go index 0d72f0cf8..d00345585 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageStreamApplyConfiguration represents an declarative configuration of the ImageStream type for use +// ImageStreamApplyConfiguration represents a declarative configuration of the ImageStream type for use // with apply. type ImageStreamApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ImageStreamApplyConfiguration struct { Status *ImageStreamStatusApplyConfiguration `json:"status,omitempty"` } -// ImageStream constructs an declarative configuration of the ImageStream type for use with +// ImageStream constructs a declarative configuration of the ImageStream type for use with // apply. func ImageStream(name, namespace string) *ImageStreamApplyConfiguration { b := &ImageStreamApplyConfiguration{} @@ -240,3 +240,9 @@ func (b *ImageStreamApplyConfiguration) WithStatus(value *ImageStreamStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageStreamApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go index 627b657f1..56a1e24fc 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go @@ -11,7 +11,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ImageStreamMappingApplyConfiguration represents an declarative configuration of the ImageStreamMapping type for use +// ImageStreamMappingApplyConfiguration represents a declarative configuration of the ImageStreamMapping type for use // with apply. type ImageStreamMappingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -20,7 +20,7 @@ type ImageStreamMappingApplyConfiguration struct { Tag *string `json:"tag,omitempty"` } -// ImageStreamMapping constructs an declarative configuration of the ImageStreamMapping type for use with +// ImageStreamMapping constructs a declarative configuration of the ImageStreamMapping type for use with // apply. func ImageStreamMapping(name, namespace string) *ImageStreamMappingApplyConfiguration { b := &ImageStreamMappingApplyConfiguration{} @@ -240,3 +240,9 @@ func (b *ImageStreamMappingApplyConfiguration) WithTag(value string) *ImageStrea b.Tag = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageStreamMappingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go index 5239862b7..09d777f17 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go @@ -2,7 +2,7 @@ package v1 -// ImageStreamSpecApplyConfiguration represents an declarative configuration of the ImageStreamSpec type for use +// ImageStreamSpecApplyConfiguration represents a declarative configuration of the ImageStreamSpec type for use // with apply. type ImageStreamSpecApplyConfiguration struct { LookupPolicy *ImageLookupPolicyApplyConfiguration `json:"lookupPolicy,omitempty"` @@ -10,7 +10,7 @@ type ImageStreamSpecApplyConfiguration struct { Tags []TagReferenceApplyConfiguration `json:"tags,omitempty"` } -// ImageStreamSpecApplyConfiguration constructs an declarative configuration of the ImageStreamSpec type for use with +// ImageStreamSpecApplyConfiguration constructs a declarative configuration of the ImageStreamSpec type for use with // apply. func ImageStreamSpec() *ImageStreamSpecApplyConfiguration { return &ImageStreamSpecApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go index 4035e7787..e2ab24aa8 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go @@ -2,7 +2,7 @@ package v1 -// ImageStreamStatusApplyConfiguration represents an declarative configuration of the ImageStreamStatus type for use +// ImageStreamStatusApplyConfiguration represents a declarative configuration of the ImageStreamStatus type for use // with apply. type ImageStreamStatusApplyConfiguration struct { DockerImageRepository *string `json:"dockerImageRepository,omitempty"` @@ -10,7 +10,7 @@ type ImageStreamStatusApplyConfiguration struct { Tags []NamedTagEventListApplyConfiguration `json:"tags,omitempty"` } -// ImageStreamStatusApplyConfiguration constructs an declarative configuration of the ImageStreamStatus type for use with +// ImageStreamStatusApplyConfiguration constructs a declarative configuration of the ImageStreamStatus type for use with // apply. func ImageStreamStatus() *ImageStreamStatusApplyConfiguration { return &ImageStreamStatusApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go index 9c00746ed..92b096aad 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go @@ -2,7 +2,7 @@ package v1 -// NamedTagEventListApplyConfiguration represents an declarative configuration of the NamedTagEventList type for use +// NamedTagEventListApplyConfiguration represents a declarative configuration of the NamedTagEventList type for use // with apply. type NamedTagEventListApplyConfiguration struct { Tag *string `json:"tag,omitempty"` @@ -10,7 +10,7 @@ type NamedTagEventListApplyConfiguration struct { Conditions []TagEventConditionApplyConfiguration `json:"conditions,omitempty"` } -// NamedTagEventListApplyConfiguration constructs an declarative configuration of the NamedTagEventList type for use with +// NamedTagEventListApplyConfiguration constructs a declarative configuration of the NamedTagEventList type for use with // apply. func NamedTagEventList() *NamedTagEventListApplyConfiguration { return &NamedTagEventListApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go index 5b98b799b..23f05b9c4 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go @@ -8,7 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SignatureConditionApplyConfiguration represents an declarative configuration of the SignatureCondition type for use +// SignatureConditionApplyConfiguration represents a declarative configuration of the SignatureCondition type for use // with apply. type SignatureConditionApplyConfiguration struct { Type *v1.SignatureConditionType `json:"type,omitempty"` @@ -19,7 +19,7 @@ type SignatureConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// SignatureConditionApplyConfiguration constructs an declarative configuration of the SignatureCondition type for use with +// SignatureConditionApplyConfiguration constructs a declarative configuration of the SignatureCondition type for use with // apply. func SignatureCondition() *SignatureConditionApplyConfiguration { return &SignatureConditionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go index ab0b0faaa..1e40e2ab1 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go @@ -2,14 +2,14 @@ package v1 -// SignatureGenericEntityApplyConfiguration represents an declarative configuration of the SignatureGenericEntity type for use +// SignatureGenericEntityApplyConfiguration represents a declarative configuration of the SignatureGenericEntity type for use // with apply. type SignatureGenericEntityApplyConfiguration struct { Organization *string `json:"organization,omitempty"` CommonName *string `json:"commonName,omitempty"` } -// SignatureGenericEntityApplyConfiguration constructs an declarative configuration of the SignatureGenericEntity type for use with +// SignatureGenericEntityApplyConfiguration constructs a declarative configuration of the SignatureGenericEntity type for use with // apply. func SignatureGenericEntity() *SignatureGenericEntityApplyConfiguration { return &SignatureGenericEntityApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go index 7b7f7fdcc..3ec7c7bbd 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go @@ -2,13 +2,13 @@ package v1 -// SignatureIssuerApplyConfiguration represents an declarative configuration of the SignatureIssuer type for use +// SignatureIssuerApplyConfiguration represents a declarative configuration of the SignatureIssuer type for use // with apply. type SignatureIssuerApplyConfiguration struct { SignatureGenericEntityApplyConfiguration `json:",inline"` } -// SignatureIssuerApplyConfiguration constructs an declarative configuration of the SignatureIssuer type for use with +// SignatureIssuerApplyConfiguration constructs a declarative configuration of the SignatureIssuer type for use with // apply. func SignatureIssuer() *SignatureIssuerApplyConfiguration { return &SignatureIssuerApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go index 9ce151975..3506d98a8 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go @@ -2,14 +2,14 @@ package v1 -// SignatureSubjectApplyConfiguration represents an declarative configuration of the SignatureSubject type for use +// SignatureSubjectApplyConfiguration represents a declarative configuration of the SignatureSubject type for use // with apply. type SignatureSubjectApplyConfiguration struct { SignatureGenericEntityApplyConfiguration `json:",inline"` PublicKeyID *string `json:"publicKeyID,omitempty"` } -// SignatureSubjectApplyConfiguration constructs an declarative configuration of the SignatureSubject type for use with +// SignatureSubjectApplyConfiguration constructs a declarative configuration of the SignatureSubject type for use with // apply. func SignatureSubject() *SignatureSubjectApplyConfiguration { return &SignatureSubjectApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go index cc7722e2d..0187ab87a 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go @@ -6,7 +6,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TagEventApplyConfiguration represents an declarative configuration of the TagEvent type for use +// TagEventApplyConfiguration represents a declarative configuration of the TagEvent type for use // with apply. type TagEventApplyConfiguration struct { Created *v1.Time `json:"created,omitempty"` @@ -15,7 +15,7 @@ type TagEventApplyConfiguration struct { Generation *int64 `json:"generation,omitempty"` } -// TagEventApplyConfiguration constructs an declarative configuration of the TagEvent type for use with +// TagEventApplyConfiguration constructs a declarative configuration of the TagEvent type for use with // apply. func TagEvent() *TagEventApplyConfiguration { return &TagEventApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go index 107560f1f..b25af8645 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go @@ -8,7 +8,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TagEventConditionApplyConfiguration represents an declarative configuration of the TagEventCondition type for use +// TagEventConditionApplyConfiguration represents a declarative configuration of the TagEventCondition type for use // with apply. type TagEventConditionApplyConfiguration struct { Type *v1.TagEventConditionType `json:"type,omitempty"` @@ -19,7 +19,7 @@ type TagEventConditionApplyConfiguration struct { Generation *int64 `json:"generation,omitempty"` } -// TagEventConditionApplyConfiguration constructs an declarative configuration of the TagEventCondition type for use with +// TagEventConditionApplyConfiguration constructs a declarative configuration of the TagEventCondition type for use with // apply. func TagEventCondition() *TagEventConditionApplyConfiguration { return &TagEventConditionApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go index bb1f7e4d6..1ce15353d 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go @@ -6,7 +6,7 @@ import ( v1 "github.com/openshift/api/image/v1" ) -// TagImportPolicyApplyConfiguration represents an declarative configuration of the TagImportPolicy type for use +// TagImportPolicyApplyConfiguration represents a declarative configuration of the TagImportPolicy type for use // with apply. type TagImportPolicyApplyConfiguration struct { Insecure *bool `json:"insecure,omitempty"` @@ -14,7 +14,7 @@ type TagImportPolicyApplyConfiguration struct { ImportMode *v1.ImportModeType `json:"importMode,omitempty"` } -// TagImportPolicyApplyConfiguration constructs an declarative configuration of the TagImportPolicy type for use with +// TagImportPolicyApplyConfiguration constructs a declarative configuration of the TagImportPolicy type for use with // apply. func TagImportPolicy() *TagImportPolicyApplyConfiguration { return &TagImportPolicyApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go index 77c6e1163..3ec958d37 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go @@ -6,7 +6,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// TagReferenceApplyConfiguration represents an declarative configuration of the TagReference type for use +// TagReferenceApplyConfiguration represents a declarative configuration of the TagReference type for use // with apply. type TagReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -18,7 +18,7 @@ type TagReferenceApplyConfiguration struct { ReferencePolicy *TagReferencePolicyApplyConfiguration `json:"referencePolicy,omitempty"` } -// TagReferenceApplyConfiguration constructs an declarative configuration of the TagReference type for use with +// TagReferenceApplyConfiguration constructs a declarative configuration of the TagReference type for use with // apply. func TagReference() *TagReferenceApplyConfiguration { return &TagReferenceApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go index 4476d0099..06d739753 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go @@ -6,13 +6,13 @@ import ( v1 "github.com/openshift/api/image/v1" ) -// TagReferencePolicyApplyConfiguration represents an declarative configuration of the TagReferencePolicy type for use +// TagReferencePolicyApplyConfiguration represents a declarative configuration of the TagReferencePolicy type for use // with apply. type TagReferencePolicyApplyConfiguration struct { Type *v1.TagReferencePolicyType `json:"type,omitempty"` } -// TagReferencePolicyApplyConfiguration constructs an declarative configuration of the TagReferencePolicy type for use with +// TagReferencePolicyApplyConfiguration constructs a declarative configuration of the TagReferencePolicy type for use with // apply. func TagReferencePolicy() *TagReferencePolicyApplyConfiguration { return &TagReferencePolicyApplyConfiguration{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go index c3ab732d7..a66912f38 100644 --- a/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go @@ -53,7 +53,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: dockerImageMetadata type: namedType: __untyped_atomic_ - default: {} - name: dockerImageMetadataVersion type: scalar: string @@ -277,11 +276,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -324,7 +321,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: created type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: dockerImageReference type: scalar: string @@ -347,7 +343,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -482,7 +477,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go index b65e75e36..5cf011812 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/image/v1" imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImagesGetter has a method to return a ImageInterface. @@ -39,143 +36,18 @@ type ImageInterface interface { // images implements ImageInterface type images struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Image, *v1.ImageList, *imagev1.ImageApplyConfiguration] } // newImages returns a Images func newImages(c *ImageV1Client) *images { return &images{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Image, *v1.ImageList, *imagev1.ImageApplyConfiguration]( + "images", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Image { return &v1.Image{} }, + func() *v1.ImageList { return &v1.ImageList{} }), } } - -// Get takes name of the image, and returns the corresponding image object, and an error if there is any. -func (c *images) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Get(). - Resource("images"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Images that match those selectors. -func (c *images) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageList{} - err = c.client.Get(). - Resource("images"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested images. -func (c *images) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("images"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. -func (c *images) Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Post(). - Resource("images"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(image). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. -func (c *images) Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Put(). - Resource("images"). - Name(image.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(image). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the image and deletes it. Returns an error if one occurs. -func (c *images) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("images"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *images) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("images"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched image. -func (c *images) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) { - result = &v1.Image{} - err = c.client.Patch(pt). - Resource("images"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied image. -func (c *images) Apply(ctx context.Context, image *imagev1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) { - if image == nil { - return nil, fmt.Errorf("image provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(image) - if err != nil { - return nil, err - } - name := image.Name - if name == nil { - return nil, fmt.Errorf("image.Name must be provided to Apply") - } - result = &v1.Image{} - err = c.client.Patch(types.ApplyPatchType). - Resource("images"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go index 195b8f371..8e643c924 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go @@ -8,7 +8,7 @@ import ( v1 "github.com/openshift/api/image/v1" scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageSignaturesGetter has a method to return a ImageSignatureInterface. @@ -26,34 +26,17 @@ type ImageSignatureInterface interface { // imageSignatures implements ImageSignatureInterface type imageSignatures struct { - client rest.Interface + *gentype.Client[*v1.ImageSignature] } // newImageSignatures returns a ImageSignatures func newImageSignatures(c *ImageV1Client) *imageSignatures { return &imageSignatures{ - client: c.RESTClient(), + gentype.NewClient[*v1.ImageSignature]( + "imagesignatures", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ImageSignature { return &v1.ImageSignature{} }), } } - -// Create takes the representation of a imageSignature and creates it. Returns the server's representation of the imageSignature, and an error, if there is any. -func (c *imageSignatures) Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (result *v1.ImageSignature, err error) { - result = &v1.ImageSignature{} - err = c.client.Post(). - Resource("imagesignatures"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageSignature). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageSignature and deletes it. Returns an error if one occurs. -func (c *imageSignatures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("imagesignatures"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go index 7c00e2fc6..49c841e2b 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go @@ -4,9 +4,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "github.com/openshift/api/image/v1" imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" @@ -14,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageStreamsGetter has a method to return a ImageStreamInterface. @@ -27,6 +24,7 @@ type ImageStreamsGetter interface { type ImageStreamInterface interface { Create(ctx context.Context, imageStream *v1.ImageStream, opts metav1.CreateOptions) (*v1.ImageStream, error) Update(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (*v1.ImageStream, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (*v1.ImageStream, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -35,6 +33,7 @@ type ImageStreamInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageStream, err error) Apply(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (*v1.SecretList, error) Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (*v1.ImageStreamLayers, error) @@ -44,209 +43,27 @@ type ImageStreamInterface interface { // imageStreams implements ImageStreamInterface type imageStreams struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ImageStream, *v1.ImageStreamList, *imagev1.ImageStreamApplyConfiguration] } // newImageStreams returns a ImageStreams func newImageStreams(c *ImageV1Client, namespace string) *imageStreams { return &imageStreams{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ImageStream, *v1.ImageStreamList, *imagev1.ImageStreamApplyConfiguration]( + "imagestreams", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ImageStream { return &v1.ImageStream{} }, + func() *v1.ImageStreamList { return &v1.ImageStreamList{} }), } } -// Get takes name of the imageStream, and returns the corresponding imageStream object, and an error if there is any. -func (c *imageStreams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStream, err error) { - result = &v1.ImageStream{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagestreams"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ImageStreams that match those selectors. -func (c *imageStreams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageStreamList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageStreamList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagestreams"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested imageStreams. -func (c *imageStreams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("imagestreams"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a imageStream and creates it. Returns the server's representation of the imageStream, and an error, if there is any. -func (c *imageStreams) Create(ctx context.Context, imageStream *v1.ImageStream, opts metav1.CreateOptions) (result *v1.ImageStream, err error) { - result = &v1.ImageStream{} - err = c.client.Post(). - Namespace(c.ns). - Resource("imagestreams"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageStream). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a imageStream and updates it. Returns the server's representation of the imageStream, and an error, if there is any. -func (c *imageStreams) Update(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (result *v1.ImageStream, err error) { - result = &v1.ImageStream{} - err = c.client.Put(). - Namespace(c.ns). - Resource("imagestreams"). - Name(imageStream.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageStream). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *imageStreams) UpdateStatus(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (result *v1.ImageStream, err error) { - result = &v1.ImageStream{} - err = c.client.Put(). - Namespace(c.ns). - Resource("imagestreams"). - Name(imageStream.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageStream). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageStream and deletes it. Returns an error if one occurs. -func (c *imageStreams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("imagestreams"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *imageStreams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("imagestreams"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched imageStream. -func (c *imageStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageStream, err error) { - result = &v1.ImageStream{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("imagestreams"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied imageStream. -func (c *imageStreams) Apply(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) { - if imageStream == nil { - return nil, fmt.Errorf("imageStream provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageStream) - if err != nil { - return nil, err - } - name := imageStream.Name - if name == nil { - return nil, fmt.Errorf("imageStream.Name must be provided to Apply") - } - result = &v1.ImageStream{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("imagestreams"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *imageStreams) ApplyStatus(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) { - if imageStream == nil { - return nil, fmt.Errorf("imageStream provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageStream) - if err != nil { - return nil, err - } - - name := imageStream.Name - if name == nil { - return nil, fmt.Errorf("imageStream.Name must be provided to Apply") - } - - result = &v1.ImageStream{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("imagestreams"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // Secrets takes name of the imageStream, and returns the corresponding v1.SecretList object, and an error if there is any. func (c *imageStreams) Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *v1.SecretList, err error) { result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("imagestreams"). Name(imageStreamName). SubResource("secrets"). @@ -259,8 +76,8 @@ func (c *imageStreams) Secrets(ctx context.Context, imageStreamName string, opti // Layers takes name of the imageStream, and returns the corresponding v1.ImageStreamLayers object, and an error if there is any. func (c *imageStreams) Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *v1.ImageStreamLayers, err error) { result = &v1.ImageStreamLayers{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("imagestreams"). Name(imageStreamName). SubResource("layers"). diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go index 79f46753a..947240b12 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go @@ -8,7 +8,7 @@ import ( imagev1 "github.com/openshift/api/image/v1" scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageStreamImagesGetter has a method to return a ImageStreamImageInterface. @@ -25,27 +25,17 @@ type ImageStreamImageInterface interface { // imageStreamImages implements ImageStreamImageInterface type imageStreamImages struct { - client rest.Interface - ns string + *gentype.Client[*imagev1.ImageStreamImage] } // newImageStreamImages returns a ImageStreamImages func newImageStreamImages(c *ImageV1Client, namespace string) *imageStreamImages { return &imageStreamImages{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*imagev1.ImageStreamImage]( + "imagestreamimages", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageStreamImage { return &imagev1.ImageStreamImage{} }), } } - -// Get takes name of the imageStreamImage, and returns the corresponding imageStreamImage object, and an error if there is any. -func (c *imageStreamImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamImage, err error) { - result = &imagev1.ImageStreamImage{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagestreamimages"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go index 7c43c951d..730bff723 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go @@ -8,7 +8,7 @@ import ( v1 "github.com/openshift/api/image/v1" scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageStreamImportsGetter has a method to return a ImageStreamImportInterface. @@ -25,27 +25,17 @@ type ImageStreamImportInterface interface { // imageStreamImports implements ImageStreamImportInterface type imageStreamImports struct { - client rest.Interface - ns string + *gentype.Client[*v1.ImageStreamImport] } // newImageStreamImports returns a ImageStreamImports func newImageStreamImports(c *ImageV1Client, namespace string) *imageStreamImports { return &imageStreamImports{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.ImageStreamImport]( + "imagestreamimports", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ImageStreamImport { return &v1.ImageStreamImport{} }), } } - -// Create takes the representation of a imageStreamImport and creates it. Returns the server's representation of the imageStreamImport, and an error, if there is any. -func (c *imageStreamImports) Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (result *v1.ImageStreamImport, err error) { - result = &v1.ImageStreamImport{} - err = c.client.Post(). - Namespace(c.ns). - Resource("imagestreamimports"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageStreamImport). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go index b19c110b7..840553797 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go @@ -4,15 +4,12 @@ package v1 import ( "context" - json "encoding/json" - "fmt" imagev1 "github.com/openshift/api/image/v1" v1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageStreamMappingsGetter has a method to return a ImageStreamMappingInterface. @@ -31,49 +28,26 @@ type ImageStreamMappingInterface interface { // imageStreamMappings implements ImageStreamMappingInterface type imageStreamMappings struct { - client rest.Interface - ns string + *gentype.ClientWithApply[*imagev1.ImageStreamMapping, *v1.ImageStreamMappingApplyConfiguration] } // newImageStreamMappings returns a ImageStreamMappings func newImageStreamMappings(c *ImageV1Client, namespace string) *imageStreamMappings { return &imageStreamMappings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithApply[*imagev1.ImageStreamMapping, *v1.ImageStreamMappingApplyConfiguration]( + "imagestreammappings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *imagev1.ImageStreamMapping { return &imagev1.ImageStreamMapping{} }), } } -// Apply takes the given apply declarative configuration, applies it and returns the applied imageStreamMapping. -func (c *imageStreamMappings) Apply(ctx context.Context, imageStreamMapping *v1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStreamMapping, err error) { - if imageStreamMapping == nil { - return nil, fmt.Errorf("imageStreamMapping provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(imageStreamMapping) - if err != nil { - return nil, err - } - name := imageStreamMapping.Name - if name == nil { - return nil, fmt.Errorf("imageStreamMapping.Name must be provided to Apply") - } - result = &imagev1.ImageStreamMapping{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("imagestreammappings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. func (c *imageStreamMappings) Create(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { result = &metav1.Status{} - err = c.client.Post(). - Namespace(c.ns). + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). Resource("imagestreammappings"). VersionedParams(&opts, scheme.ParameterCodec). Body(imageStreamMapping). diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go index 4ea36ccdc..8029bc15d 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go @@ -4,12 +4,11 @@ package v1 import ( "context" - "time" v1 "github.com/openshift/api/image/v1" scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageStreamTagsGetter has a method to return a ImageStreamTagInterface. @@ -30,82 +29,18 @@ type ImageStreamTagInterface interface { // imageStreamTags implements ImageStreamTagInterface type imageStreamTags struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1.ImageStreamTag, *v1.ImageStreamTagList] } // newImageStreamTags returns a ImageStreamTags func newImageStreamTags(c *ImageV1Client, namespace string) *imageStreamTags { return &imageStreamTags{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1.ImageStreamTag, *v1.ImageStreamTagList]( + "imagestreamtags", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ImageStreamTag { return &v1.ImageStreamTag{} }, + func() *v1.ImageStreamTagList { return &v1.ImageStreamTagList{} }), } } - -// Get takes name of the imageStreamTag, and returns the corresponding imageStreamTag object, and an error if there is any. -func (c *imageStreamTags) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStreamTag, err error) { - result = &v1.ImageStreamTag{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagestreamtags"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ImageStreamTags that match those selectors. -func (c *imageStreamTags) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageStreamTagList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageStreamTagList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagestreamtags"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Create takes the representation of a imageStreamTag and creates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. -func (c *imageStreamTags) Create(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.CreateOptions) (result *v1.ImageStreamTag, err error) { - result = &v1.ImageStreamTag{} - err = c.client.Post(). - Namespace(c.ns). - Resource("imagestreamtags"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageStreamTag). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a imageStreamTag and updates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. -func (c *imageStreamTags) Update(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.UpdateOptions) (result *v1.ImageStreamTag, err error) { - result = &v1.ImageStreamTag{} - err = c.client.Put(). - Namespace(c.ns). - Resource("imagestreamtags"). - Name(imageStreamTag.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageStreamTag). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageStreamTag and deletes it. Returns an error if one occurs. -func (c *imageStreamTags) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("imagestreamtags"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go index a0d80e3ac..f56c8b804 100644 --- a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go @@ -4,12 +4,11 @@ package v1 import ( "context" - "time" v1 "github.com/openshift/api/image/v1" scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ImageTagsGetter has a method to return a ImageTagInterface. @@ -30,82 +29,18 @@ type ImageTagInterface interface { // imageTags implements ImageTagInterface type imageTags struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1.ImageTag, *v1.ImageTagList] } // newImageTags returns a ImageTags func newImageTags(c *ImageV1Client, namespace string) *imageTags { return &imageTags{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1.ImageTag, *v1.ImageTagList]( + "imagetags", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ImageTag { return &v1.ImageTag{} }, + func() *v1.ImageTagList { return &v1.ImageTagList{} }), } } - -// Get takes name of the imageTag, and returns the corresponding imageTag object, and an error if there is any. -func (c *imageTags) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageTag, err error) { - result = &v1.ImageTag{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagetags"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ImageTags that match those selectors. -func (c *imageTags) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageTagList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ImageTagList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("imagetags"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Create takes the representation of a imageTag and creates it. Returns the server's representation of the imageTag, and an error, if there is any. -func (c *imageTags) Create(ctx context.Context, imageTag *v1.ImageTag, opts metav1.CreateOptions) (result *v1.ImageTag, err error) { - result = &v1.ImageTag{} - err = c.client.Post(). - Namespace(c.ns). - Resource("imagetags"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageTag). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a imageTag and updates it. Returns the server's representation of the imageTag, and an error, if there is any. -func (c *imageTags) Update(ctx context.Context, imageTag *v1.ImageTag, opts metav1.UpdateOptions) (result *v1.ImageTag, err error) { - result = &v1.ImageTag{} - err = c.client.Put(). - Namespace(c.ns). - Resource("imagetags"). - Name(imageTag.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(imageTag). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the imageTag and deletes it. Returns an error if one occurs. -func (c *imageTags) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("imagetags"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go index ef8b2c8f2..b5f5e3b7e 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -7,6 +7,7 @@ import ( "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -127,19 +128,45 @@ type GrpcPodConfig struct { // SecurityContextConfig can be one of `legacy` or `restricted`. The CatalogSource's pod is either injected with the // right pod.spec.securityContext and pod.spec.container[*].securityContext values to allow the pod to run in Pod // Security Admission (PSA) `restricted` mode, or doesn't set these values at all, in which case the pod can only be - // run in PSA `baseline` or `privileged` namespaces. Currently if the SecurityContextConfig is unspecified, the default - // value of `legacy` is used. Specifying a value other than `legacy` or `restricted` result in a validation error. - // When using older catalog images, which could not be run in `restricted` mode, the SecurityContextConfig should be - // set to `legacy`. - // - // In a future version will the default will be set to `restricted`, catalog maintainers should rebuild their catalogs - // with a version of opm that supports running catalogSource pods in `restricted` mode to prepare for these changes. + // run in PSA `baseline` or `privileged` namespaces. If the SecurityContextConfig is unspecified, the mode will be + // determined by the namespace's PSA configuration. If the namespace is enforcing `restricted` mode, then the pod + // will be configured as if `restricted` was specified. Otherwise, it will be configured as if `legacy` was + // specified. Specifying a value other than `legacy` or `restricted` result in a validation error. When using older + // catalog images, which can not run in `restricted` mode, the SecurityContextConfig should be set to `legacy`. // // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/' // +optional // +kubebuilder:validation:Enum=legacy;restricted - // +kubebuilder:default:=legacy SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"` + + // MemoryTarget configures the $GOMEMLIMIT value for the gRPC catalog Pod. This is a soft memory limit for the server, + // which the runtime will attempt to meet but makes no guarantees that it will do so. If this value is set, the Pod + // will have the following modifications made to the container running the server: + // - the $GOMEMLIMIT environment variable will be set to this value in bytes + // - the memory request will be set to this value + // + // This field should be set if it's desired to reduce the footprint of a catalog server as much as possible, or if + // a catalog being served is very large and needs more than the default allocation. If your index image has a file- + // system cache, determine a good approximation for this value by doubling the size of the package cache at + // /tmp/cache/cache/packages.json in the index image. + // + // This field is best-effort; if unset, no default will be used and no Pod memory limit or $GOMEMLIMIT value will be set. + // +optional + MemoryTarget *resource.Quantity `json:"memoryTarget,omitempty"` + + // ExtractContent configures the gRPC catalog Pod to extract catalog metadata from the provided index image and + // use a well-known version of the `opm` server to expose it. The catalog index image that this CatalogSource is + // configured to use *must* be using the file-based catalogs in order to utilize this feature. + // +optional + ExtractContent *ExtractContentConfig `json:"extractContent,omitempty"` +} + +// ExtractContentConfig configures context extraction from a file-based catalog index image. +type ExtractContentConfig struct { + // CacheDir is the directory storing the pre-calculated API cache. + CacheDir string `json:"cacheDir"` + // CatalogDir is the directory storing the file-based catalog contents. + CatalogDir string `json:"catalogDir"` } // UpdateStrategy holds all the different types of catalog source update strategies @@ -209,9 +236,12 @@ type CatalogSourceStatus struct { // The last time the CatalogSource image registry has been polled to ensure the image is up-to-date LatestImageRegistryPoll *metav1.Time `json:"latestImageRegistryPoll,omitempty"` - ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` - RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` - GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` + // ConfigMapReference (deprecated) is the reference to the ConfigMap containing the catalog source's configuration, when the catalog source is a ConfigMap + ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` + // RegistryService represents the current state of the GRPC service used to serve the catalog + RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` + // ConnectionState represents the current state of the CatalogSource's connection to the registry + GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` // Represents the state of a CatalogSource. Note that Message and Reason represent the original // status information, which may be migrated to be conditions based in the future. Any new features diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go index ffc357b12..a4c8d1746 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go @@ -120,12 +120,19 @@ func (c *ClusterServiceVersion) IsObsolete() bool { // IsCopied returns true if the CSV has been copied and false otherwise. func (c *ClusterServiceVersion) IsCopied() bool { - operatorNamespace, ok := c.GetAnnotations()[OperatorGroupNamespaceAnnotationKey] - if c.Status.Reason == CSVReasonCopied || ok && c.GetNamespace() != operatorNamespace { - return true + return c.Status.Reason == CSVReasonCopied || IsCopied(c) +} + +func IsCopied(o metav1.Object) bool { + annotations := o.GetAnnotations() + if annotations != nil { + operatorNamespace, ok := annotations[OperatorGroupNamespaceAnnotationKey] + if ok && o.GetNamespace() != operatorNamespace { + return true + } } - if labels := c.GetLabels(); labels != nil { + if labels := o.GetLabels(); labels != nil { if _, ok := labels[CopiedLabelKey]; ok { return true } diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go index 2452f9a1c..292fedf9b 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go @@ -90,6 +90,13 @@ type SubscriptionConfig struct { // Use empty object ({}) to erase original sub-attribute values. // +optional Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + + // Annotations is an unstructured key value map stored with each Deployment, Pod, APIService in the Operator. + // Typically, annotations may be set by external tools to store and retrieve arbitrary metadata. + // Use this field to pre-define annotations that OLM should add to each of the Subscription's + // deployments, pods, and apiservices. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` } // SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true" @@ -117,6 +124,19 @@ const ( // SubscriptionBundleUnpackFailed indicates that the unpack job failed SubscriptionBundleUnpackFailed SubscriptionConditionType = "BundleUnpackFailed" + + // SubscriptionDeprecated is a roll-up condition which indicates that the Operator currently installed with this Subscription + //has been deprecated. It will be present when any of the three deprecation types (Package, Channel, Bundle) are present. + SubscriptionDeprecated SubscriptionConditionType = "Deprecated" + + // SubscriptionOperatorDeprecated indicates that the Package currently installed with this Subscription has been deprecated. + SubscriptionPackageDeprecated SubscriptionConditionType = "PackageDeprecated" + + // SubscriptionOperatorDeprecated indicates that the Channel used with this Subscription has been deprecated. + SubscriptionChannelDeprecated SubscriptionConditionType = "ChannelDeprecated" + + // SubscriptionOperatorDeprecated indicates that the Bundle currently installed with this Subscription has been deprecated. + SubscriptionBundleDeprecated SubscriptionConditionType = "BundleDeprecated" ) const ( diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go index cf01001f5..684a7432a 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* @@ -690,6 +689,21 @@ func (in *DependentStatus) DeepCopy() *DependentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtractContentConfig) DeepCopyInto(out *ExtractContentConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtractContentConfig. +func (in *ExtractContentConfig) DeepCopy() *ExtractContentConfig { + if in == nil { + return nil + } + out := new(ExtractContentConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { *out = *in @@ -733,6 +747,16 @@ func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) { *out = new(string) **out = **in } + if in.MemoryTarget != nil { + in, out := &in.MemoryTarget, &out.MemoryTarget + x := (*in).DeepCopy() + *out = &x + } + if in.ExtractContent != nil { + in, out := &in.ExtractContent, &out.ExtractContent + *out = new(ExtractContentConfig) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig. @@ -1404,6 +1428,13 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { *out = new(v1.Affinity) (*in).DeepCopyInto(*out) } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE index e06d20818..74e6ec696 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE @@ -176,7 +176,7 @@ Apache License END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. - + To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include @@ -199,4 +199,3 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go index a9914fb1a..6f4298483 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go @@ -14,6 +14,12 @@ package monitoring -const ( +// GroupName is set to var instead of const, since this provides the ability for clients importing the module - +// github.com/prometheus-operator/prometheus-operator/pkg/apis to manage the operator's objects in a different +// API group +// +// Use `ldflags` in the client side, e.g.: +// go run -ldflags="-s -X github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring.GroupName=monitoring.example.com" ./example/client/. +var ( GroupName = "monitoring.coreos.com" ) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go new file mode 100644 index 000000000..25736ce92 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go @@ -0,0 +1,60 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +import ( + "fmt" +) + +const ( + PrometheusesKind = "Prometheus" + PrometheusName = "prometheuses" + + AlertmanagersKind = "Alertmanager" + AlertmanagerName = "alertmanagers" + + ServiceMonitorsKind = "ServiceMonitor" + ServiceMonitorName = "servicemonitors" + + PodMonitorsKind = "PodMonitor" + PodMonitorName = "podmonitors" + + PrometheusRuleKind = "PrometheusRule" + PrometheusRuleName = "prometheusrules" + + ProbesKind = "Probe" + ProbeName = "probes" + + ScrapeConfigsKind = "ScrapeConfig" + ScrapeConfigName = "scrapeconfigs" +) + +var resourceToKindMap = map[string]string{ + PrometheusName: PrometheusesKind, + AlertmanagerName: AlertmanagersKind, + ServiceMonitorName: ServiceMonitorsKind, + PodMonitorName: PodMonitorsKind, + PrometheusRuleName: PrometheusRuleKind, + ProbeName: ProbesKind, + ScrapeConfigName: ScrapeConfigsKind, +} + +func ResourceToKind(s string) string { + kind, found := resourceToKindMap[s] + if !found { + panic(fmt.Sprintf("failed to map resource %q to a kind", s)) + } + return kind +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index 8f44c9d31..ebd369d33 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -37,8 +37,15 @@ const ( // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Paused",type="boolean",JSONPath=".status.paused",description="Whether the resource reconciliation is paused or not",priority=1 // +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale -// Alertmanager describes an Alertmanager cluster. +// The `Alertmanager` custom resource definition (CRD) defines a desired [Alertmanager](https://prometheus.io/docs/alerting) setup to run in a Kubernetes cluster. It allows to specify many options such as the number of replicas, persistent storage and many more. +// +// For each `Alertmanager` resource, the Operator deploys a `StatefulSet` in the same namespace. When there are two or more configured replicas, the Operator runs the Alertmanager instances in high-availability mode. +// +// The resource defines via label and namespace selectors which `AlertmanagerConfig` objects should be associated to the deployed Alertmanager instances. type Alertmanager struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -60,7 +67,15 @@ func (l *Alertmanager) DeepCopyObject() runtime.Object { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerSpec struct { - // PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. + // PodMetadata configures labels and annotations which are propagated to the Alertmanager pods. + // + // The following items are reserved and cannot be overridden: + // * "alertmanager" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/instance" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "alertmanager". + // * "app.kubernetes.io/version" label, set to the Alertmanager version. + // * "kubectl.kubernetes.io/default-container" annotation, set to "alertmanager". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the @@ -75,17 +90,15 @@ type AlertmanagerSpec struct { Version string `json:"version,omitempty"` // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. - // Deprecated: use 'image' instead. The image tag can be specified - // as part of the image URL. + // Deprecated: use 'image' instead. The image tag can be specified as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. - // Deprecated: use 'image' instead. The image digest can be specified - // as part of the image URL. + // Deprecated: use 'image' instead. The image digest can be specified as part of the image URL. SHA string `json:"sha,omitempty"` // Base image that is used to deploy pods, without tag. - // Deprecated: use 'image' instead + // Deprecated: use 'image' instead. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries @@ -115,10 +128,10 @@ type AlertmanagerSpec struct { // receiver (effectively dropping alert notifications). ConfigSecret string `json:"configSecret,omitempty"` // Log level for Alertmanager to be configured with. - //+kubebuilder:validation:Enum="";debug;info;warn;error + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` // Log format for Alertmanager to be configured with. - //+kubebuilder:validation:Enum="";logfmt;json + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` // Size is the expected size of the alertmanager cluster. The controller will // eventually make the size of the running cluster equal to the expected @@ -164,6 +177,14 @@ type AlertmanagerSpec struct { // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // Defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // Defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` @@ -200,6 +221,9 @@ type AlertmanagerSpec struct { ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` // Interval between gossip attempts. ClusterGossipInterval GoDuration `json:"clusterGossipInterval,omitempty"` + // Defines the identifier that uniquely identifies the Alertmanager cluster. + // You should only set it when the Alertmanager cluster includes Alertmanager instances which are external to this Alertmanager resource. In practice, the addresses of the external instances are provided via the `.spec.additionalPeers` field. + ClusterLabel *string `json:"clusterLabel,omitempty"` // Interval between pushpull attempts. ClusterPushpullInterval GoDuration `json:"clusterPushpullInterval,omitempty"` // Timeout for cluster peering. @@ -213,12 +237,14 @@ type AlertmanagerSpec struct { ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` // AlertmanagerConfigs to be selected for to merge and configure Alertmanager with. AlertmanagerConfigSelector *metav1.LabelSelector `json:"alertmanagerConfigSelector,omitempty"` - // The AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects match the alerts. - // In the future more options may be added. - AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` // Namespaces to be selected for AlertmanagerConfig discovery. If nil, only // check own namespace. AlertmanagerConfigNamespaceSelector *metav1.LabelSelector `json:"alertmanagerConfigNamespaceSelector,omitempty"` + + // AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects + // process incoming alerts. + AlertmanagerConfigMatcherStrategy AlertmanagerConfigMatcherStrategy `json:"alertmanagerConfigMatcherStrategy,omitempty"` + // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) @@ -231,22 +257,54 @@ type AlertmanagerSpec struct { HostAliases []HostAlias `json:"hostAliases,omitempty"` // Defines the web command line flags when starting Alertmanager. Web *AlertmanagerWebSpec `json:"web,omitempty"` - // EXPERIMENTAL: alertmanagerConfiguration specifies the configuration of Alertmanager. + // alertmanagerConfiguration specifies the configuration of Alertmanager. + // // If defined, it takes precedence over the `configSecret` field. - // This field may change in future releases. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + //+optional AlertmanagerConfiguration *AlertmanagerConfiguration `json:"alertmanagerConfiguration,omitempty"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod. + // If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + // Enable access to Alertmanager feature flags. By default, no features are enabled. + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // It requires Alertmanager >= 0.27.0. + // +optional + EnableFeatures []string `json:"enableFeatures,omitempty"` } -// AlertmanagerConfigMatcherStrategy defines the strategy used by AlertmanagerConfig objects to match alerts. type AlertmanagerConfigMatcherStrategy struct { - // If set to `OnNamespace`, the operator injects a label matcher matching the namespace of the AlertmanagerConfig object for all its routes and inhibition rules. - // `None` will not add any additional matchers other than the ones specified in the AlertmanagerConfig. - // Default is `OnNamespace`. + // AlertmanagerConfigMatcherStrategyType defines the strategy used by + // AlertmanagerConfig objects to match alerts in the routes and inhibition + // rules. + // + // The default value is `OnNamespace`. + // // +kubebuilder:validation:Enum="OnNamespace";"None" // +kubebuilder:default:="OnNamespace" - Type string `json:"type,omitempty"` + Type AlertmanagerConfigMatcherStrategyType `json:"type,omitempty"` } +type AlertmanagerConfigMatcherStrategyType string + +const ( + // With `OnNamespace`, the route and inhibition rules of an + // AlertmanagerConfig object only process alerts that have a `namespace` + // label equal to the namespace of the object. + OnNamespaceConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "OnNamespace" + + // With `None`, the route and inhbition rules of an AlertmanagerConfig + // object process all incoming alerts. + NoneConfigMatcherStrategyType AlertmanagerConfigMatcherStrategyType = "None" +) + // AlertmanagerConfiguration defines the Alertmanager configuration. // +k8s:openapi-gen=true type AlertmanagerConfiguration struct { @@ -266,6 +324,10 @@ type AlertmanagerConfiguration struct { // AlertmanagerGlobalConfig configures parameters that are valid in all other configuration contexts. // See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file type AlertmanagerGlobalConfig struct { + // Configures global SMTP parameters. + // +optional + SMTPConfig *GlobalSMTPConfig `json:"smtp,omitempty"` + // ResolveTimeout is the default value used by alertmanager if the alert does // not include EndsAt, after this time passes it can declare the alert as resolved if it has not been updated. // This has no impact on alerts from Prometheus, as they always include EndsAt. @@ -282,6 +344,9 @@ type AlertmanagerGlobalConfig struct { // The default OpsGenie API Key. OpsGenieAPIKey *v1.SecretKeySelector `json:"opsGenieApiKey,omitempty"` + + // The default Pagerduty URL. + PagerdutyURL *string `json:"pagerdutyUrl,omitempty"` } // AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. @@ -303,6 +368,8 @@ type AlertmanagerStatus struct { AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Alertmanager object. UnavailableReplicas int32 `json:"unavailableReplicas"` + // The selector used to match the pods targeted by this Alertmanager object. + Selector string `json:"selector,omitempty"` // The current state of the Alertmanager object. // +listType=map // +listMapKey=type @@ -336,6 +403,53 @@ type AlertmanagerWebSpec struct { Timeout *uint32 `json:"timeout,omitempty"` } +// GlobalSMTPConfig configures global SMTP parameters. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type GlobalSMTPConfig struct { + // The default SMTP From header field. + // +optional + From *string `json:"from,omitempty"` + + // The default SMTP smarthost used for sending emails. + // +optional + SmartHost *HostPort `json:"smartHost,omitempty"` + + // The default hostname to identify to the SMTP server. + // +optional + Hello *string `json:"hello,omitempty"` + + // SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. + // +optional + AuthUsername *string `json:"authUsername,omitempty"` + + // SMTP Auth using LOGIN and PLAIN. + // +optional + AuthPassword *v1.SecretKeySelector `json:"authPassword,omitempty"` + + // SMTP Auth using PLAIN + // +optional + AuthIdentity *string `json:"authIdentity,omitempty"` + + // SMTP Auth using CRAM-MD5. + // +optional + AuthSecret *v1.SecretKeySelector `json:"authSecret,omitempty"` + + // The default SMTP TLS requirement. + // Note that Go does not support unencrypted connections to remote SMTP endpoints. + // +optional + RequireTLS *bool `json:"requireTLS,omitempty"` +} + +// HostPort represents a "host:port" network address. +type HostPort struct { + // Defines the host's address, it can be a DNS name or a literal IP address. + // +kubebuilder:validation:MinLength=1 + Host string `json:"host"` + // Defines the host's port, it can be a literal port number or a port name. + // +kubebuilder:validation:MinLength=1 + Port string `json:"port"` +} + // HTTPConfig defines a client HTTP configuration. // See https://prometheus.io/docs/alerting/latest/configuration/#http_config type HTTPConfig struct { @@ -359,9 +473,8 @@ type HTTPConfig struct { // TLS configuration for the client. // +optional TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` - // Optional proxy URL. // +optional - ProxyURL string `json:"proxyURL,omitempty"` + ProxyConfig `json:",inline"` // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. // +optional FollowRedirects *bool `json:"followRedirects,omitempty"` diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go new file mode 100644 index 000000000..a4731458b --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/dns_types.go @@ -0,0 +1,82 @@ +// Copyright 2024 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package v1 + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // +kubebuilder:validation:Optional + // +listType:=set + // +kubebuilder:validation:items:MinLength:=1 + Nameservers []string `json:"nameservers,omitempty"` + + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // +kubebuilder:validation:Optional + // +listType:=set + // +kubebuilder:validation:items:MinLength:=1 + Searches []string `json:"searches,omitempty"` + + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Options []PodDNSConfigOption `json:"options,omitempty"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Name is required and must be unique. + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + + // Value is optional. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty"` +} + +// DNSPolicy specifies the DNS policy for the pod. +// +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +const ( +// DefaultTerminationGracePeriodSeconds indicates the default duration in +// seconds a pod needs to terminate gracefully. +) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go index 630959919..a6e2c1605 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -31,7 +31,14 @@ const ( // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator",shortName="pmon" -// PodMonitor defines monitoring for a set of pods. +// The `PodMonitor` custom resource definition (CRD) defines how `Prometheus` and `PrometheusAgent` can scrape metrics from a group of pods. +// Among other things, it allows to specify: +// * The pods to scrape via label selectors. +// * The container ports to scrape. +// * Authentication credentials to use. +// * Target and metric relabeling. +// +// `Prometheus` and `PrometheusAgent` objects select `PodMonitor` objects using label and namespace selectors. type PodMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -48,31 +55,105 @@ func (l *PodMonitor) DeepCopyObject() runtime.Object { // +k8s:openapi-gen=true type PodMonitorSpec struct { // The label to use to retrieve the job name from. + // `jobLabel` selects the label from the associated Kubernetes `Pod` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Pod` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty, the `job` label of the metrics + // defaults to the namespace and name of the PodMonitor object (e.g. `/`). JobLabel string `json:"jobLabel,omitempty"` - // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + + // `podTargetLabels` defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // A list of endpoints allowed as part of this PodMonitor. + + // Defines how to scrape metrics from the selected pods. + // + // +optional PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` - // Selector to select Pod objects. + + // Label selector to select the Kubernetes `Pod` objects to scrape metrics from. Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Endpoints objects are discovered from. + // `namespaceSelector` defines in which namespace(s) Prometheus should discover the pods. + // By default, the pods are discovered in the same namespace as the `PodMonitor` object but it is possible to select pods across different/all namespaces. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` - // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - SampleLimit uint64 `json:"sampleLimit,omitempty"` - // TargetLimit defines a limit on the number of scraped targets that will be accepted. - TargetLimit uint64 `json:"targetLimit,omitempty"` + + // `sampleLimit` defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + + // `targetLimit` defines a limit on the number of scraped targets that will + // be accepted. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // Per-scrape limit on number of labels that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. - LabelLimit uint64 `json:"labelLimit,omitempty"` + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` // Per-scrape limit on length of labels name that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. - LabelNameLengthLimit uint64 `json:"labelNameLengthLimit,omitempty"` + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` // Per-scrape limit on length of labels value that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. - LabelValueLengthLimit uint64 `json:"labelValueLengthLimit,omitempty"` - // Attaches node metadata to discovered targets. - // Requires Prometheus v2.35.0 and above. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + NativeHistogramConfig `json:",inline"` + + // Per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // `attachMetadata` defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.35.0. + // + // +optional AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // When defined, bodySizeLimit specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` } // PodMonitorList is a list of PodMonitors. @@ -91,66 +172,151 @@ func (l *PodMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } -// PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. +// PodMetricsEndpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// // +k8s:openapi-gen=true type PodMetricsEndpoint struct { - // Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. + // Name of the Pod port which this endpoint refers to. + // + // It takes precedence over `targetPort`. Port string `json:"port,omitempty"` - // Deprecated: Use 'port' instead. + + // Name or number of the target port of the `Pod` object behind the Service, the + // port must be specified with container port property. + // + // Deprecated: use 'port' instead. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` - // HTTP path to scrape for metrics. + + // HTTP path from which to scrape for metrics. + // // If empty, Prometheus uses the default value (e.g. `/metrics`). Path string `json:"path,omitempty"` + // HTTP scheme to use for scraping. - // `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. + // + // `http` and `https` are the expected values unless you rewrite the + // `__scheme__` label via relabeling. + // // If empty, Prometheus uses the default value `http`. + // // +kubebuilder:validation:Enum=http;https Scheme string `json:"scheme,omitempty"` - // Optional HTTP URL parameters + + // `params` define optional HTTP URL parameters. Params map[string][]string `json:"params,omitempty"` - // Interval at which metrics should be scraped - // If not specified Prometheus' global scrape interval is used. + + // Interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. Interval Duration `json:"interval,omitempty"` - // Timeout after which the scrape is ended - // If not specified, the Prometheus global scrape interval is used. + + // Timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` - // TLS configuration to use when scraping the endpoint. - TLSConfig *PodMetricsEndpointTLSConfig `json:"tlsConfig,omitempty"` - // Secret to mount to read bearer token for scraping targets. The secret - // needs to be in the same namespace as the pod monitor and accessible by - // the Prometheus Operator. + + // TLS configuration to use when scraping the target. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // `bearerTokenSecret` specifies a key of a Secret containing the bearer + // token for scraping targets. The secret needs to be in the same namespace + // as the PodMonitor object and readable by the Prometheus Operator. + // + // +optional + // + // Deprecated: use `authorization` instead. BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - // HonorLabels chooses the metric's labels on collisions with target labels. + + // When true, `honorLabels` preserves the metric's labels when they collide + // with the target's labels. HonorLabels bool `json:"honorLabels,omitempty"` - // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + + // `honorTimestamps` controls whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional HonorTimestamps *bool `json:"honorTimestamps,omitempty"` - // BasicAuth allow an endpoint to authenticate over basic authentication. - // More info: https://prometheus.io/docs/operating/configuration/#endpoint + + // `trackTimestampsStaleness` defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + + // `basicAuth` configures the Basic Authentication credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + + // `oauth2` configures the OAuth2 settings to use when scraping the target. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional OAuth2 *OAuth2 `json:"oauth2,omitempty"` - // Authorization section for this endpoint + + // `authorization` configures the Authorization header credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional Authorization *SafeAuthorization `json:"authorization,omitempty"` - // MetricRelabelConfigs to apply to samples before ingestion. - MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` - // RelabelConfigs to apply to samples before scraping. - // Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. + + // `metricRelabelings` configures the relabeling rules to apply to the + // samples before ingestion. + // + // +optional + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` + + // `relabelings` configures the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` - // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + // + // +optional + RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"` + + // `proxyURL` configures the HTTP Proxy URL (e.g. + // "http://proxyserver:2195") to go through when scraping the target. + // + // +optional ProxyURL *string `json:"proxyUrl,omitempty"` - // FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. + + // `followRedirects` defines whether the scrape requests should follow HTTP + // 3xx redirects. + // + // +optional FollowRedirects *bool `json:"followRedirects,omitempty"` - // Whether to enable HTTP2. + + // `enableHttp2` can be used to disable HTTP2 when scraping the target. + // + // +optional EnableHttp2 *bool `json:"enableHttp2,omitempty"` - // Drop pods that are not running. (Failed, Succeeded). Enabled by default. + + // When true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional FilterRunning *bool `json:"filterRunning,omitempty"` } - -// PodMetricsEndpointTLSConfig specifies TLS configuration parameters. -// +k8s:openapi-gen=true -type PodMetricsEndpointTLSConfig struct { - SafeTLSConfig `json:",inline"` -} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go index 46c107705..16c927ad6 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -30,7 +30,13 @@ const ( // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator",shortName="prb" -// Probe defines monitoring for a set of static targets or ingresses. +// The `Probe` custom resource definition (CRD) defines how to scrape metrics from prober exporters such as the [blackbox exporter](https://github.com/prometheus/blackbox_exporter). +// +// The `Probe` resource needs 2 pieces of information: +// * The list of probed addresses which can be defined statically or by discovering Kubernetes Ingress objects. +// * The prober which exposes the availability of probed endpoints (over various protocols such HTTP, TCP, ICMP, ...) as Prometheus metrics. +// +// `Prometheus` and `PrometheusAgent` objects select `Probe` objects using label and namespace selectors. type Probe struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -64,7 +70,7 @@ type ProbeSpec struct { // If not specified, the Prometheus global scrape timeout is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint. - TLSConfig *ProbeTLSConfig `json:"tlsConfig,omitempty"` + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` // Secret to mount to read bearer token for scraping targets. The secret // needs to be in the same namespace as the probe and accessible by // the Prometheus Operator. @@ -75,22 +81,51 @@ type ProbeSpec struct { // OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. OAuth2 *OAuth2 `json:"oauth2,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. - MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` + MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"` // Authorization section for this endpoint Authorization *SafeAuthorization `json:"authorization,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - SampleLimit uint64 `json:"sampleLimit,omitempty"` + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` // TargetLimit defines a limit on the number of scraped targets that will be accepted. - TargetLimit uint64 `json:"targetLimit,omitempty"` + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` // Per-scrape limit on number of labels that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. - LabelLimit uint64 `json:"labelLimit,omitempty"` + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` // Per-scrape limit on length of labels name that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. - LabelNameLengthLimit uint64 `json:"labelNameLengthLimit,omitempty"` + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` // Per-scrape limit on length of labels value that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. - LabelValueLengthLimit uint64 `json:"labelValueLengthLimit,omitempty"` + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + + NativeHistogramConfig `json:",inline"` + // Per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` } // ProbeTargets defines how to discover the probed targets. @@ -139,7 +174,7 @@ type ProbeTargetStaticConfig struct { // RelabelConfigs to apply to the label set of the targets before it gets // scraped. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` } // ProbeTargetIngress defines the set of Ingress objects considered for probing. @@ -157,7 +192,7 @@ type ProbeTargetIngress struct { // probed URL. // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` + RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"` } // ProberSpec contains specification parameters for the Prober used for probing. @@ -193,9 +228,3 @@ type ProbeList struct { func (l *ProbeList) DeepCopyObject() runtime.Object { return l.DeepCopy() } - -// ProbeTLSConfig specifies TLS configuration parameters for the prober. -// +k8s:openapi-gen=true -type ProbeTLSConfig struct { - SafeTLSConfig `json:",inline"` -} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index 49b18c284..406613435 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -17,9 +17,12 @@ package v1 import ( "strings" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -29,15 +32,38 @@ const ( PrometheusKindKey = "prometheus" ) +// ScrapeProtocol represents a protocol used by Prometheus for scraping metrics. +// Supported values are: +// * `OpenMetricsText0.0.1` +// * `OpenMetricsText1.0.0` +// * `PrometheusProto` +// * `PrometheusText0.0.4` +// +kubebuilder:validation:Enum=PrometheusProto;OpenMetricsText0.0.1;OpenMetricsText1.0.0;PrometheusText0.0.4 +type ScrapeProtocol string + +// RuntimeConfig configures the values for the process behavior. +type RuntimeConfig struct { + // The Go garbage collection target percentage. Lowering this number may increase the CPU usage. + // See: https://tip.golang.org/doc/gc-guide#GOGC + // +optional + // +kubebuilder:validation:Minimum=-1 + GoGC *int32 `json:"goGC,omitempty"` +} + // PrometheusInterface is used by Prometheus and PrometheusAgent to share common methods, e.g. config generation. // +k8s:deepcopy-gen=false type PrometheusInterface interface { metav1.ObjectMetaAccessor - GetTypeMeta() metav1.TypeMeta + schema.ObjectKind + GetCommonPrometheusFields() CommonPrometheusFields SetCommonPrometheusFields(CommonPrometheusFields) + + GetStatus() PrometheusStatus } +var _ = PrometheusInterface(&Prometheus{}) + func (l *Prometheus) GetCommonPrometheusFields() CommonPrometheusFields { return l.Spec.CommonPrometheusFields } @@ -46,209 +72,402 @@ func (l *Prometheus) SetCommonPrometheusFields(f CommonPrometheusFields) { l.Spec.CommonPrometheusFields = f } -func (l *Prometheus) GetTypeMeta() metav1.TypeMeta { - return l.TypeMeta +func (l *Prometheus) GetStatus() PrometheusStatus { + return l.Status +} + +// +kubebuilder:validation:Enum=OnResource;OnShard +type AdditionalLabelSelectors string + +const ( + // Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards). + ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource" + + // Automatically add a label selector that will select all pods matching the same shard. + ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard" +) + +type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint + +type TopologySpreadConstraint struct { + CoreV1TopologySpreadConstraint `json:",inline"` + + //+optional + // Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint. + AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"` } +// +kubebuilder:validation:MinLength:=1 +type EnableFeature string + // CommonPrometheusFields are the options available to both the Prometheus server and agent. // +k8s:deepcopy-gen=true type CommonPrometheusFields struct { - // PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. + // PodMetadata configures labels and annotations which are propagated to the Prometheus pods. + // + // The following items are reserved and cannot be overridden: + // * "prometheus" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/instance" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "prometheus". + // * "app.kubernetes.io/version" label, set to the Prometheus version. + // * "operator.prometheus.io/name" label, set to the name of the Prometheus object. + // * "operator.prometheus.io/shard" label, set to the shard number of the Prometheus object. + // * "kubectl.kubernetes.io/default-container" annotation, set to "prometheus". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` - // ServiceMonitors to be selected for target discovery. + + // ServiceMonitors to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` - // Namespace's labels to match for ServiceMonitor discovery. If nil, only - // check own namespace. + // Namespaces to match for ServicedMonitors discovery. An empty label selector + // matches all namespaces. A null label selector (default value) matches the current + // namespace only. ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` - // *Experimental* PodMonitors to be selected for target discovery. + + // PodMonitors to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` - // Namespace's labels to match for PodMonitor discovery. If nil, only - // check own namespace. + // Namespaces to match for PodMonitors discovery. An empty label selector + // matches all namespaces. A null label selector (default value) matches the current + // namespace only. PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` - // *Experimental* Probes to be selected for target discovery. + + // Probes to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` - // *Experimental* Namespaces to be selected for Probe discovery. If nil, only check own namespace. + // Namespaces to match for Probe discovery. An empty label + // selector matches all namespaces. A null label selector matches the + // current namespace only. ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` - // *Experimental* ScrapeConfigs to be selected for target discovery. + + // ScrapeConfigs to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional ScrapeConfigSelector *metav1.LabelSelector `json:"scrapeConfigSelector,omitempty"` - // Namespace's labels to match for ScrapeConfig discovery. If nil, only - // check own namespace. + // Namespaces to match for ScrapeConfig discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional ScrapeConfigNamespaceSelector *metav1.LabelSelector `json:"scrapeConfigNamespaceSelector,omitempty"` - // Version of Prometheus to be deployed. + + // Version of Prometheus being deployed. The operator uses this information + // to generate the Prometheus StatefulSet + configuration files. + // + // If not specified, the operator assumes the latest upstream version of + // Prometheus available at the time when the version of the operator was + // released. Version string `json:"version,omitempty"` + // When a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. Paused bool `json:"paused,omitempty"` - // Image if specified has precedence over baseImage, tag and sha - // combinations. Specifying the version is still necessary to ensure the - // Prometheus Operator knows what version of Prometheus is being - // configured. + + // Container image name for Prometheus. If specified, it takes precedence + // over the `spec.baseImage`, `spec.tag` and `spec.sha` fields. + // + // Specifying `spec.version` is still necessary to ensure the Prometheus + // Operator knows which version of Prometheus is being configured. + // + // If neither `spec.image` nor `spec.baseImage` are defined, the operator + // will use the latest upstream version of Prometheus available at the time + // when the operator was released. + // + // +optional Image *string `json:"image,omitempty"` // Image pull policy for the 'prometheus', 'init-config-reloader' and 'config-reloader' containers. // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` - // An optional list of references to secrets in the same namespace - // to use for pulling prometheus and alertmanager images from registries - // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // An optional list of references to Secrets in the same namespace + // to use for pulling images from registries. + // See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Number of replicas of each shard to deploy for a Prometheus deployment. - // Number of replicas multiplied by shards is the total number of Pods + // `spec.replicas` multiplied by `spec.shards` is the total number of Pods // created. + // + // Default: 1 + // +optional Replicas *int32 `json:"replicas,omitempty"` - // EXPERIMENTAL: Number of shards to distribute targets onto. Number of - // replicas multiplied by shards is the total number of Pods created. Note - // that scaling down shards will not reshard data onto remaining instances, - // it must be manually moved. Increasing shards will not reshard data - // either but it will continue to be available from the same instances. To - // query globally use Thanos sidecar and Thanos querier or remote write - // data to a central location. Sharding is done on the content of the - // `__address__` target meta-label. + // Number of shards to distribute targets onto. `spec.replicas` + // multiplied by `spec.shards` is the total number of Pods created. + // + // Note that scaling down shards will not reshard data onto remaining + // instances, it must be manually moved. Increasing shards will not reshard + // data either but it will continue to be available from the same + // instances. To query globally, use Thanos sidecar and Thanos querier or + // remote write data to a central location. + // + // Sharding is performed on the content of the `__address__` target meta-label + // for PodMonitors and ServiceMonitors and `__param_target__` for Probes. + // + // Default: 1 + // +optional Shards *int32 `json:"shards,omitempty"` - // Name of Prometheus external label used to denote replica name. - // Defaults to the value of `prometheus_replica`. External label will - // _not_ be added when value is set to empty string (`""`). + + // Name of Prometheus external label used to denote the replica name. + // The external label will _not_ be added when the field is set to the + // empty string (`""`). + // + // Default: "prometheus_replica" + // +optional ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` - // Name of Prometheus external label used to denote Prometheus instance - // name. Defaults to the value of `prometheus`. External label will - // _not_ be added when value is set to empty string (`""`). + // Name of Prometheus external label used to denote the Prometheus instance + // name. The external label will _not_ be added when the field is set to + // the empty string (`""`). + // + // Default: "prometheus" + // +optional PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` - // Log level for Prometheus to be configured with. - //+kubebuilder:validation:Enum="";debug;info;warn;error + + // Log level for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` - // Log format for Prometheus to be configured with. - //+kubebuilder:validation:Enum="";logfmt;json + // Log format for Log level for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` - // Interval between consecutive scrapes. Default: `30s` + + // Interval between consecutive scrapes. + // + // Default: "30s" // +kubebuilder:default:="30s" ScrapeInterval Duration `json:"scrapeInterval,omitempty"` - // Number of seconds to wait for target to respond before erroring. + // Number of seconds to wait until a scrape request times out. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // The protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // The labels to add to any time series or alerts when communicating with // external systems (federation, remote storage, Alertmanager). + // Labels defined by `spec.replicaExternalLabelName` and + // `spec.prometheusExternalLabelName` take precedence over this list. ExternalLabels map[string]string `json:"externalLabels,omitempty"` - // Enable Prometheus to be used as a receiver for the Prometheus remote write protocol. Defaults to the value of `false`. + + // Enable Prometheus to be used as a receiver for the Prometheus remote + // write protocol. + // // WARNING: This is not considered an efficient way of ingesting samples. // Use it with caution for specific low-volume use cases. // It is not suitable for replacing the ingestion via scraping and turning // Prometheus into a push-based metrics collection system. // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver - // Only valid in Prometheus versions 2.33.0 and newer. + // + // It requires Prometheus >= v2.33.0. EnableRemoteWriteReceiver bool `json:"enableRemoteWriteReceiver,omitempty"` - // Enable access to Prometheus disabled features. By default, no features are enabled. - // Enabling disabled features is entirely outside the scope of what the maintainers will - // support and by doing so, you accept that this behaviour may break at any - // time without notice. - // For more information see https://prometheus.io/docs/prometheus/latest/disabled_features/ - EnableFeatures []string `json:"enableFeatures,omitempty"` - // The external URL the Prometheus instances will be available under. This is - // necessary to generate correct URLs. This is necessary if Prometheus is not - // served from root of a DNS name. + + // List of the protobuf message versions to accept when receiving the + // remote writes. + // + // It requires Prometheus >= v2.54.0. + // + // +kubebuilder:validation:MinItems=1 + // +listType:=set + // +optional + RemoteWriteReceiverMessageVersions []RemoteWriteMessageVersion `json:"remoteWriteReceiverMessageVersions,omitempty"` + + // Enable access to Prometheus feature flags. By default, no features are enabled. + // + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/ + // + // +listType:=set + // +optional + EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"` + + // The external URL under which the Prometheus service is externally + // available. This is necessary to generate correct URLs (for instance if + // Prometheus is accessible behind an Ingress resource). ExternalURL string `json:"externalUrl,omitempty"` - // The route prefix Prometheus registers HTTP handlers for. This is useful, - // if using ExternalURL and a proxy is rewriting HTTP routes of a request, - // and the actual ExternalURL is still true, but the server serves requests - // under a different route prefix. For example for use with `kubectl proxy`. + // The route prefix Prometheus registers HTTP handlers for. + // + // This is useful when using `spec.externalURL`, and a proxy is rewriting + // HTTP routes of a request, and the actual ExternalURL is still true, but + // the server serves requests under a different route prefix. For example + // for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` - // Storage spec to specify how storage shall be used. + + // Storage defines the storage used by Prometheus. Storage *StorageSpec `json:"storage,omitempty"` - // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will - // be appended to other volumes that are generated as a result of StorageSpec objects. + + // Volumes allows the configuration of additional volumes on the output + // StatefulSet definition. Volumes specified will be appended to other + // volumes that are generated as a result of StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` - // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. - // VolumeMounts specified will be appended to other VolumeMounts in the prometheus container, - // that are generated as a result of StorageSpec objects. + // VolumeMounts allows the configuration of additional VolumeMounts. + // + // VolumeMounts will be appended to other VolumeMounts in the 'prometheus' + // container, that are generated as a result of StorageSpec objects. VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` - // Defines the web command line flags when starting Prometheus. + + // The field controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + + // Defines the configuration of the Prometheus web server. Web *PrometheusWebSpec `json:"web,omitempty"` - // Define resources requests and limits for single Pods. + + // Defines the resources requests and limits of the 'prometheus' container. Resources v1.ResourceRequirements `json:"resources,omitempty"` - // Define which Nodes the Pods are scheduled on. + + // Defines on which Nodes the Pods are scheduled. NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod. + // If the field isn't set, the operator mounts the service account token by default. + // + // **Warning:** be aware that by default, Prometheus requires the service account token for Kubernetes service discovery. + // It is possible to use strategic merge patch to project the service account token into the 'prometheus' container. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + // Secrets is a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // Each Secret is added to the StatefulSet definition as a volume named `secret-`. // The Secrets are mounted into /etc/prometheus/secrets/ in the 'prometheus' container. + // +listType:=set Secrets []string `json:"secrets,omitempty"` // ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. // The ConfigMaps are mounted into /etc/prometheus/configmaps/ in the 'prometheus' container. ConfigMaps []string `json:"configMaps,omitempty"` - // If specified, the pod's scheduling constraints. + + // Defines the Pods' affinity scheduling rules if specified. + // +optional Affinity *v1.Affinity `json:"affinity,omitempty"` - // If specified, the pod's tolerations. + // Defines the Pods' tolerations if specified. + // +optional Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // If specified, the pod's topology spread constraints. - TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` - // remoteWrite is the list of remote write configurations. + + // Defines the pod's topology spread constraints if specified. + //+optional + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // Defines the list of remote write configurations. + // +optional RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + + // Settings related to the OTLP receiver feature. + // It requires Prometheus >= v2.55.0. + // + // +optional + OTLP *OTLPConfig `json:"otlp,omitempty"` + // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. + // +optional SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` - // ListenLocal makes the Prometheus server listen on loopback, so that it - // does not bind against the Pod IP. + + // Defines the DNS policy for the pods. + // + // +optional + DNSPolicy *DNSPolicy `json:"dnsPolicy,omitempty"` + // Defines the DNS configuration for the pods. + // + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + // When true, the Prometheus server listens on the loopback address + // instead of the Pod IP's address. ListenLocal bool `json:"listenLocal,omitempty"` + // Containers allows injecting additional containers or modifying operator // generated containers. This can be used to allow adding an authentication - // proxy to a Prometheus pod or to change the behavior of an operator - // generated container. Containers described here modify an operator - // generated container if they share the same name and modifications are - // done via a strategic merge patch. The current container names are: - // `prometheus`, `config-reloader`, and `thanos-sidecar`. Overriding - // containers is entirely outside the scope of what the maintainers will - // support and by doing so, you accept that this behaviour may break at any - // time without notice. + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `prometheus` + // * `config-reloader` + // * `thanos-sidecar` + // + // Overriding containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional Containers []v1.Container `json:"containers,omitempty"` - // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. - // fetch secrets for injection into the Prometheus configuration from external sources. Any errors - // during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // InitContainers described here modify an operator - // generated init containers if they share the same name and modifications are - // done via a strategic merge patch. The current init container name is: - // `init-config-reloader`. Overriding init containers is entirely outside the - // scope of what the maintainers will support and by doing so, you accept that - // this behaviour may break at any time without notice. + // InitContainers allows injecting initContainers to the Pod definition. Those + // can be used to e.g. fetch secrets for injection into the Prometheus + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator generated init + // containers if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of init container name managed by the operator are: + // * `init-config-reloader`. + // + // Overriding init containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` + // AdditionalScrapeConfigs allows specifying a key of a Secret containing // additional Prometheus scrape configurations. Scrape configurations // specified are appended to the configurations generated by the Prometheus @@ -260,117 +479,383 @@ type CommonPrometheusFields struct { // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible scrape configs are going to break // Prometheus after the upgrade. + // +optional AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` - // APIServerConfig allows specifying a host and auth methods to access apiserver. - // If left empty, Prometheus is assumed to run inside of the cluster - // and will discover API servers automatically and use the pod's CA certificate + + // APIServerConfig allows specifying a host and auth methods to access the + // Kuberntees API server. + // If null, Prometheus is assumed to run inside of the cluster: it will + // discover the API servers automatically and use the Pod's CA certificate // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + // +optional APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` - // Priority class assigned to the Pods + + // Priority class assigned to the Pods. PriorityClassName string `json:"priorityClassName,omitempty"` // Port name used for the pods and governing service. - // Defaults to `web`. + // Default: "web" // +kubebuilder:default:="web" PortName string `json:"portName,omitempty"` - // ArbitraryFSAccessThroughSMs configures whether configuration - // based on a service monitor can access arbitrary files on the file system - // of the Prometheus container e.g. bearer token files. + + // When true, ServiceMonitor, PodMonitor and Probe object are forbidden to + // reference arbitrary files on the file system of the 'prometheus' + // container. + // When a ServiceMonitor's endpoint specifies a `bearerTokenFile` value + // (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'), a + // malicious target can get access to the Prometheus service account's + // token in the Prometheus' scrape request. Setting + // `spec.arbitraryFSAccessThroughSM` to 'true' would prevent the attack. + // Users should instead provide the credentials using the + // `spec.bearerTokenSecret` field. ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` - // When true, Prometheus resolves label conflicts by renaming the labels in - // the scraped data to "exported_