diff --git a/onnxruntime/python/tools/tensorrt/perf/build/build_image.py b/onnxruntime/python/tools/tensorrt/perf/build/build_image.py index 763d160fa56b5..3ebc33c02592d 100644 --- a/onnxruntime/python/tools/tensorrt/perf/build/build_image.py +++ b/onnxruntime/python/tools/tensorrt/perf/build/build_image.py @@ -17,8 +17,8 @@ TRT_DOCKER_FILES = { "8.6.cuda_11_8_cudnn_8": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_8_tensorrt8_6", "8.6.cuda_12_3_cudnn_9": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_3_tensorrt8_6", - "10.4.cuda_11_8_cudnn_8": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10", - "10.4.cuda_12_5_cudnn_9": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10", + "10.5.cuda_11_8_cudnn_8": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10", + "10.5.cuda_12_5_cudnn_9": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10", "BIN": "tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt_bin", } diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index 85eb3ddad3c56..6b67686bb4f30 100644 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -2120,10 +2120,10 @@ def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs): if not args.disable_ml_ops and not args.use_tensorrt: run_subprocess([sys.executable, "onnxruntime_test_python_mlops.py"], cwd=cwd, dll_path=dll_path) - # if args.use_tensorrt: - # run_subprocess( - # [sys.executable, "onnxruntime_test_python_nested_control_flow_op.py"], cwd=cwd, dll_path=dll_path - # ) + if args.use_tensorrt: + run_subprocess( + [sys.executable, "onnxruntime_test_python_nested_control_flow_op.py"], cwd=cwd, dll_path=dll_path + ) try: import onnx # noqa: F401 diff --git a/tools/ci_build/github/azure-pipelines/bigmodels-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/bigmodels-ci-pipeline.yml index 3ee4375329069..5af95d345794d 100644 --- a/tools/ci_build/github/azure-pipelines/bigmodels-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/bigmodels-ci-pipeline.yml @@ -40,7 +40,6 @@ parameters: default: 0 variables: - - template: templates/common-variables.yml - name: docker_base_image value: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda11_x64_almalinux8_gcc11:20241020.1 - name: linux_trt_version @@ -123,7 +122,7 @@ stages: --build_shared_lib \ --parallel \ --build_wheel \ - --enable_onnx_tests --use_cuda --cuda_version=${{variables.common_cuda_version}} --cuda_home=/usr/local/cuda-${{variables.common_cuda_version}} --cudnn_home=/usr/local/cuda-${{variables.common_cuda_version}} \ + --enable_onnx_tests --use_cuda --cuda_version=11.8 --cuda_home=/usr/local/cuda-11.8 --cudnn_home=/usr/local/cuda-11.8 \ --enable_cuda_profiling --enable_cuda_nhwc_ops \ --enable_pybind --build_java \ --use_cache \ diff --git a/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml b/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml index b12360d2710d0..4e7d6ec8e62de 100644 --- a/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml +++ b/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml @@ -77,13 +77,14 @@ resources: ref: 5eda9aded5462201e6310105728d33016e637ea7 variables: +- template: templates/common-variables.yml - name: ReleaseVersionSuffix value: '' - name: win_trt_version value: 11.8 - name: win_trt_home - value: $(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8 + value: $(Agent.TempDirectory)\${{ variables.win_trt_folder_cuda11 }} - name: win_cuda_home value: $(Agent.TempDirectory)\v11.8 diff --git a/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml b/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml index 7118e85e9ea4b..9b103715e734d 100644 --- a/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/cuda-packaging-pipeline.yml @@ -59,13 +59,14 @@ parameters: - 12.2 variables: + - template: templates/common-variables.yml - name: ReleaseVersionSuffix value: '' - name: win_trt_home ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: $(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8 + value: $(Agent.TempDirectory)\${{ variables.win_trt_folder_cuda11 }} ${{ if eq(parameters.CudaVersion, '12.2') }}: - value: $(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6 + value: $(Agent.TempDirectory)\${{ variables.win_trt_folder_cuda12 }} - name: win_cuda_home ${{ if eq(parameters.CudaVersion, '11.8') }}: value: $(Agent.TempDirectory)\v11.8 diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-ci-pipeline.yml index 87d5c7bd824d2..14a5c10abc74e 100644 --- a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-ci-pipeline.yml @@ -37,6 +37,7 @@ parameters: - 12.2 variables: + - template: templates/common-variables.yml - name: docker_base_image ${{ if eq(parameters.CudaVersion, '11.8') }}: value: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda11_x64_almalinux8_gcc11:20241020.1 @@ -44,9 +45,9 @@ variables: value: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda12_x64_ubi8_gcc12:20241020.1 - name: linux_trt_version ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: 10.4.0.26-1.cuda11.8 + value: ${{ variables.linux_trt_version_cuda11 }} ${{ if eq(parameters.CudaVersion, '12.2') }}: - value: 10.4.0.26-1.cuda12.6 + value: ${{ variables.linux_trt_version_cuda12 }} jobs: - job: Linux_Build diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml index fb2c86dbf68e3..83cf26614a285 100644 --- a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml @@ -8,12 +8,12 @@ parameters: - name: TrtVersion displayName: TensorRT Version type: string - default: 10.4.cuda_12_5_cudnn_9 + default: 10.5.cuda_12_5_cudnn_9 values: - 8.6.cuda_11_8_cudnn_8 - 8.6.cuda_12_3_cudnn_9 - - 10.4.cuda_11_8_cudnn_8 - - 10.4.cuda_12_5_cudnn_9 + - 10.5.cuda_11_8_cudnn_8 + - 10.5.cuda_12_5_cudnn_9 - BIN - name: UseTensorrtOssParser diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml index b1e5816fb748e..f9ecfb7cf7938 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/test_linux.yml @@ -23,6 +23,7 @@ stages: pool: ${{ parameters.AgentPool }} variables: + - template: ../../templates/common-variables.yml - name: OnnxRuntimeBuildDirectory value: '$(Build.BinariesDirectory)' @@ -52,7 +53,7 @@ stages: inputs: script: | ln -sf /data/models $(Build.BinariesDirectory) - + # As for Debian installation, replace '-1.' by '-1+' when assigning trt version below - ${{if contains(parameters.StageSuffix , 'GPU') }}: - template: ../../templates/get-docker-image-steps.yml parameters: @@ -61,7 +62,7 @@ stages: ${{ if eq(parameters.CudaVersion, '12.2') }}: DockerBuildArgs: " --build-arg BASEIMAGE=nvidia/cuda:12.2.2-devel-ubuntu20.04 - --build-arg TRT_VERSION=10.4.0.26-1+cuda12.6 + --build-arg TRT_VERSION=${{ replace(variables.linux_trt_version_cuda12, '-1.', '-1+') }} --build-arg BUILD_UID=$( id -u ) " ${{ else }}: diff --git a/tools/ci_build/github/azure-pipelines/post-merge-jobs.yml b/tools/ci_build/github/azure-pipelines/post-merge-jobs.yml index 7f131590c900b..3eafd7350b25b 100644 --- a/tools/ci_build/github/azure-pipelines/post-merge-jobs.yml +++ b/tools/ci_build/github/azure-pipelines/post-merge-jobs.yml @@ -1,3 +1,20 @@ +parameters: +- name: CudaVersion + displayName: CUDA version + type: string + default: '12.2' + values: + - 11.8 + - 12.2 + +variables: + - template: templates/common-variables.yml + - name: win_trt_folder + ${{ if eq(parameters.CudaVersion, '11.8') }}: + value: ${{ variables.win_trt_folder_cuda11 }} + ${{ if eq(parameters.CudaVersion, '12.2') }}: + value: ${{ variables.win_trt_folder_cuda12 }} + stages: - ${{ if or(startsWith(variables['System.CollectionUri'], 'https://dev.azure.com/aiinfra/'),startsWith(variables['System.CollectionUri'], 'https://aiinfra.visualstudio.com/')) }}: - template: templates/web-ci.yml @@ -206,7 +223,7 @@ stages: BuildConfig: 'RelWithDebInfo' EnvSetupScript: setup_env_cuda.bat buildArch: x64 - additionalBuildFlags: --enable_pybind --build_java --build_nodejs --use_cuda --cuda_home="$(Agent.TempDirectory)\v11.8" --enable_cuda_profiling --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=86 + additionalBuildFlags: --enable_pybind --build_java --build_nodejs --use_cuda --cuda_home="$(Agent.TempDirectory)\v${{ parameters.CudaVersion }}" --enable_cuda_profiling --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=86 msbuildPlatform: x64 isX86: false job_name_suffix: x64_RelWithDebInfo @@ -226,7 +243,7 @@ stages: BuildConfig: 'RelWithDebInfo' EnvSetupScript: setup_env_trt.bat buildArch: x64 - additionalBuildFlags: --enable_pybind --build_java --build_nodejs --use_cuda --cuda_home="$(Agent.TempDirectory)\v11.8" --enable_cuda_profiling --use_tensorrt --tensorrt_home="$(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8" --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=86 + additionalBuildFlags: --enable_pybind --build_java --build_nodejs --use_cuda --cuda_home="$(Agent.TempDirectory)\v${{ parameters.CudaVersion }}" --enable_cuda_profiling --use_tensorrt --tensorrt_home="$(Agent.TempDirectory)\${{ variables.win_trt_folder }}" --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=86 msbuildPlatform: x64 isX86: false job_name_suffix: x64_RelWithDebInfo diff --git a/tools/ci_build/github/azure-pipelines/py-cuda-package-test-pipeline.yml b/tools/ci_build/github/azure-pipelines/py-cuda-package-test-pipeline.yml index 5094c56956978..f4cfc6c59bbfa 100644 --- a/tools/ci_build/github/azure-pipelines/py-cuda-package-test-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/py-cuda-package-test-pipeline.yml @@ -19,6 +19,5 @@ stages: python_wheel_suffix: '_gpu' timeout: 480 docker_base_image: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda12_x64_ubi8_gcc12:20241020.1 - trt_version: '10.4.0.26-1.cuda12.6' cuda_version: '12.2' diff --git a/tools/ci_build/github/azure-pipelines/stages/jobs/py-linux-cuda-package-test-job.yml b/tools/ci_build/github/azure-pipelines/stages/jobs/py-linux-cuda-package-test-job.yml index a33f757c24408..d08cbc8e73272 100644 --- a/tools/ci_build/github/azure-pipelines/stages/jobs/py-linux-cuda-package-test-job.yml +++ b/tools/ci_build/github/azure-pipelines/stages/jobs/py-linux-cuda-package-test-job.yml @@ -42,16 +42,17 @@ jobs: dependsOn: [ ] timeoutInMinutes: ${{ parameters.timeout }} variables: + - template: ../../templates/common-variables.yml - name: docker_base_image ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: nvidia/cuda:11.8.0-cudnn8-devel-ubi8 + value: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda11_x64_almalinux8_gcc11:20241020.1 ${{ if eq(parameters.CudaVersion, '12.2') }}: value: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda12_x64_ubi8_gcc12:20241020.1 - name: linux_trt_version ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: 10.4.0.26-1.cuda11.8 + value: ${{ variables.linux_trt_version_cuda11 }} ${{ if eq(parameters.CudaVersion, '12.2') }}: - value: 10.4.0.26-1.cuda12.6 + value: ${{ variables.linux_trt_version_cuda12 }} pool: ${{ parameters.machine_pool }} steps: - checkout: self diff --git a/tools/ci_build/github/azure-pipelines/stages/nuget-linux-cuda-packaging-stage.yml b/tools/ci_build/github/azure-pipelines/stages/nuget-linux-cuda-packaging-stage.yml index fc6da88917f62..d331c76bc264e 100644 --- a/tools/ci_build/github/azure-pipelines/stages/nuget-linux-cuda-packaging-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/nuget-linux-cuda-packaging-stage.yml @@ -68,6 +68,7 @@ stages: timeoutInMinutes: 180 pool: 'onnxruntime-Ubuntu2204-AMD-CPU' variables: + - template: ../templates/common-variables.yml - name: CUDA_VERSION_MAJOR ${{ if eq(parameters.CudaVersion, '11.8') }}: value: '11' @@ -75,12 +76,11 @@ stages: value: '12' - name: CUDA_VERSION value: ${{ parameters.CudaVersion }} - - name: linux_trt_version ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: 10.4.0.26-1.cuda11.8 + value: ${{ variables.linux_trt_version_cuda11 }} ${{ if eq(parameters.CudaVersion, '12.2') }}: - value: 10.4.0.26-1.cuda12.6 + value: ${{ variables.linux_trt_version_cuda12 }} steps: - checkout: self clean: true @@ -140,6 +140,7 @@ stages: clean: all pool: 'Onnxruntime-Linux-GPU' variables: + - template: ../templates/common-variables.yml - name: CUDA_VERSION_MAJOR ${{ if eq(parameters.CudaVersion, '11.8') }}: value: '11' @@ -147,9 +148,9 @@ stages: value: '12' - name: linux_trt_version ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: 10.4.0.26-1.cuda11.8 + value: ${{ variables.linux_trt_version_cuda11 }} ${{ if eq(parameters.CudaVersion, '12.2') }}: - value: 10.4.0.26-1.cuda12.6 + value: ${{ variables.linux_trt_version_cuda12 }} steps: - checkout: self # due to checkout multiple repos, the root directory is $(Build.SourcesDirectory)/onnxruntime submodules: false diff --git a/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml b/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml index 0160fdd6ddd95..da4cd625b546b 100644 --- a/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml @@ -56,10 +56,8 @@ stages: PYTHON_VERSION: ${{ python_version }} EP_NAME: gpu CudaVersion: ${{ parameters.cuda_version }} - ${{ if eq(parameters.cuda_version, '11.8') }}: - EP_BUILD_FLAGS: --enable_lto --use_tensorrt --tensorrt_home=$(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8 --cuda_home=$(Agent.TempDirectory)\v11.8 --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=52;60;61;70;75;80" - ${{ if eq(parameters.cuda_version, '12.2') }}: - EP_BUILD_FLAGS: --enable_lto --use_tensorrt --tensorrt_home=$(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6 --cuda_home=$(Agent.TempDirectory)\v12.2 --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=52;60;61;70;75;80" + EP_BUILD_FLAGS: --enable_lto --cuda_home=$(Agent.TempDirectory)\v${{ parameters.cuda_version }} --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=52;60;61;70;75;80" + use_tensorrt: True - ${{ if eq(parameters.enable_linux_cuda, true) }}: - template: py-linux-gpu-stage.yml @@ -71,10 +69,8 @@ stages: cuda_version: ${{ parameters.cuda_version }} ${{ if eq(parameters.cuda_version, '11.8') }}: docker_base_image: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda11_x64_almalinux8_gcc11:20241020.1 - trt_version: 10.4.0.26-1.cuda11.8 ${{ if eq(parameters.cuda_version, '12.2') }}: docker_base_image: onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda12_x64_ubi8_gcc12:20241020.1 - trt_version: 10.4.0.26-1.cuda12.6 - ${{ if eq(parameters.enable_windows_dml, true) }}: - ${{ each python_version in parameters.PythonVersions }}: diff --git a/tools/ci_build/github/azure-pipelines/stages/py-linux-gpu-stage.yml b/tools/ci_build/github/azure-pipelines/stages/py-linux-gpu-stage.yml index 83b863f18fbc4..3f26d2d5aeca3 100644 --- a/tools/ci_build/github/azure-pipelines/stages/py-linux-gpu-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/py-linux-gpu-stage.yml @@ -20,12 +20,6 @@ parameters: - name: docker_base_image type: string -- name: trt_version - type: string - default: '10.4.0.26-1.cuda11.8' - values: - - 10.4.0.26-1.cuda11.8 - - 10.4.0.26-1.cuda12.6 - name: cuda_version type: string default: '11.8' @@ -71,6 +65,12 @@ stages: value: -x ${{ parameters.extra_build_arg }} ${{ if eq(parameters.extra_build_arg, '') }}: value: '' + - template: ../templates/common-variables.yml + - name: trt_version + ${{ if eq(parameters.cuda_version, '11.8') }}: + value: ${{ variables.linux_trt_version_cuda11 }} + ${{ if eq(parameters.cuda_version, '12.2') }}: + value: ${{ variables.linux_trt_version_cuda12 }} steps: - checkout: self clean: true @@ -82,7 +82,7 @@ stages: parameters: Dockerfile: tools/ci_build/github/linux/docker/inference/${{ parameters.arch }}/python/cuda/Dockerfile Context: tools/ci_build/github/linux/docker/inference/${{ parameters.arch }}/python/cuda - DockerBuildArgs: "--build-arg BASEIMAGE=${{ parameters.docker_base_image }} --build-arg TRT_VERSION=${{ parameters.trt_version }} --build-arg BUILD_UID=$( id -u )" + DockerBuildArgs: "--build-arg BASEIMAGE=${{ parameters.docker_base_image }} --build-arg TRT_VERSION=${{ variables.trt_version }} --build-arg BUILD_UID=$( id -u )" Repository: onnxruntimecuda${{ replace(parameters.cuda_version, '.', '') }}xtrt86build${{ parameters.arch }} diff --git a/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml b/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml index abcdef1cdc3ea..02ecb4585a0c0 100644 --- a/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml @@ -37,6 +37,10 @@ parameters: - Release - RelWithDebInfo - MinSizeRel + +- name: use_tensorrt + type: boolean + default: false stages: - stage: Win_py_${{ parameters.EP_NAME }}_Wheels_${{ replace(parameters.PYTHON_VERSION,'.','_') }}_Build @@ -65,9 +69,23 @@ stages: targetPath: $(Build.ArtifactStagingDirectory) artifactName: win_${{ parameters.EP_NAME }}_wheel_${{ parameters.PYTHON_VERSION }} variables: - GRADLE_OPTS: '-Dorg.gradle.daemon=false' - VSGenerator: 'Visual Studio 17 2022' - CUDA_MODULE_LOADING: 'LAZY' + - template: ../templates/common-variables.yml + - name: GRADLE_OPTS + value: '-Dorg.gradle.daemon=false' + - name: VSGenerator + value: 'Visual Studio 17 2022' + - name: CUDA_MODULE_LOADING + value: 'LAZY' + - name: win_trt_folder + ${{ if eq(parameters.CudaVersion, '11.8') }}: + value: ${{ variables.win_trt_folder_cuda11 }} + ${{ if eq(parameters.CudaVersion, '12.2') }}: + value: ${{ variables.win_trt_folder_cuda12 }} + - name: trt_build_flag + ${{ if eq(parameters.use_tensorrt, true) }}: + value: '--use_tensorrt --tensorrt_home="$(Agent.TempDirectory)\${{ variables.win_trt_folder }}"' + ${{ if eq(parameters.use_tensorrt, false) }}: + value: '' steps: - task: mspremier.PostBuildCleanup.PostBuildCleanup-task.PostBuildCleanup@3 displayName: 'Clean Agent Directories' @@ -97,19 +115,17 @@ stages: - template: ../templates/jobs/set-winenv.yml parameters: EnvSetupScript: ${{ parameters.ENV_SETUP_SCRIPT }} - ${{ if or(contains(parameters.EP_BUILD_FLAGS, 'use_cuda'), contains(parameters.EP_BUILD_FLAGS, 'use_tensorrt')) }}: + ${{ if or(contains(parameters.EP_BUILD_FLAGS, 'use_cuda'), eq(parameters.use_tensorrt, true)) }}: DownloadCUDA: true - ${{ if contains(parameters.EP_BUILD_FLAGS, 'use_tensorrt') }}: - DownloadTRT: true + DownloadTRT: ${{ parameters.use_tensorrt }} - ${{ if eq(parameters.ENV_SETUP_SCRIPT, '') }}: - template: ../templates/jobs/download_win_gpu_library.yml parameters: CudaVersion: ${{ parameters.CudaVersion }} - ${{ if or(contains(parameters.EP_BUILD_FLAGS, 'use_cuda'), contains(parameters.EP_BUILD_FLAGS, 'use_tensorrt')) }}: + ${{ if or(contains(parameters.EP_BUILD_FLAGS, 'use_cuda'), eq(parameters.use_tensorrt, true)) }}: DownloadCUDA: true - ${{ if contains(parameters.EP_BUILD_FLAGS, 'use_tensorrt') }}: - DownloadTRT: true + DownloadTRT: ${{ parameters.use_tensorrt }} - task: PythonScript@0 displayName: 'Update deps.txt' @@ -132,7 +148,7 @@ stages: --enable_pybind --enable_onnx_tests --parallel --use_binskim_compliant_compile_flags --update --build - $(TelemetryOption) ${{ parameters.BUILD_PY_PARAMETERS }} ${{ parameters.EP_BUILD_FLAGS }} + $(TelemetryOption) ${{ parameters.BUILD_PY_PARAMETERS }} ${{ parameters.EP_BUILD_FLAGS }} ${{ variables.trt_build_flag }} workingDirectory: '$(Build.BinariesDirectory)' # Esrp signing diff --git a/tools/ci_build/github/azure-pipelines/templates/common-variables.yml b/tools/ci_build/github/azure-pipelines/templates/common-variables.yml index e7f703fa592a3..b57e45a1069fe 100644 --- a/tools/ci_build/github/azure-pipelines/templates/common-variables.yml +++ b/tools/ci_build/github/azure-pipelines/templates/common-variables.yml @@ -1,3 +1,7 @@ variables: - common_cuda_version: '11.8' - common_cuda_baseimg: 'nvidia/cuda:11.8.0-cudnn8-devel-ubi8' + common_trt_version: '10.5.0.18' + # As for Debian installation, replace '-1.' by '-1+' when assigning trt version below + linux_trt_version_cuda11: ${{ variables.common_trt_version }}-1.cuda11.8 + linux_trt_version_cuda12: ${{ variables.common_trt_version }}-1.cuda12.6 + win_trt_folder_cuda11: TensorRT-${{ variables.common_trt_version }}.Windows10.x86_64.cuda-11.8 + win_trt_folder_cuda12: TensorRT-${{ variables.common_trt_version }}.Windows10.x86_64.cuda-12.6 \ No newline at end of file diff --git a/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml b/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml index e196ecb312f96..8b3101a34f760 100644 --- a/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml +++ b/tools/ci_build/github/azure-pipelines/templates/jobs/download_win_gpu_library.yml @@ -13,10 +13,10 @@ parameters: - 12.2 - name: TrtVersion type: string - default: '10.4.0.26' + default: '10.5.0.18' values: - 8.6.1.6 - - 10.4.0.26 + - 10.5.0.18 steps: - ${{ if eq(parameters.DownloadCUDA, true) }}: @@ -42,7 +42,7 @@ steps: - powershell: | Write-Host "##vso[task.setvariable variable=trtCudaVersion;]12.0" displayName: Set trtCudaVersion - - ${{ if and(eq(parameters.CudaVersion, '12.2'), eq(parameters.TrtVersion, '10.4.0.26')) }}: + - ${{ if and(eq(parameters.CudaVersion, '12.2'), eq(parameters.TrtVersion, '10.5.0.18')) }}: - powershell: | Write-Host "##vso[task.setvariable variable=trtCudaVersion;]12.6" displayName: Set trtCudaVersion diff --git a/tools/ci_build/github/azure-pipelines/templates/jobs/set-winenv.yml b/tools/ci_build/github/azure-pipelines/templates/jobs/set-winenv.yml index 6a2b7f4566b61..71b929b995349 100644 --- a/tools/ci_build/github/azure-pipelines/templates/jobs/set-winenv.yml +++ b/tools/ci_build/github/azure-pipelines/templates/jobs/set-winenv.yml @@ -13,6 +13,12 @@ parameters: - name: SecondaryCUDAVersion type: string default: '11.8' + - name: win_trt_folder_cuda11 + type: string + default: 'TensorRT-10.5.0.18.Windows10.x86_64.cuda-11.8' + - name: win_trt_folder_cuda12 + type: string + default: 'TensorRT-10.5.0.18.Windows10.x86_64.cuda-12.6' steps: - ${{ if eq(parameters.DownloadCUDA, 'true') }}: @@ -24,11 +30,11 @@ steps: displayName: 'Download Secondary CUDA SDK v${{ parameters.SecondaryCUDAVersion }}' - ${{ if eq(parameters.DownloadTRT, 'true') }}: - powershell: | - azcopy.exe cp --recursive "https://lotusscus.blob.core.windows.net/models/local/TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8" $(Agent.TempDirectory) - displayName: 'Download TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8' + azcopy.exe cp --recursive "https://lotusscus.blob.core.windows.net/models/local/${{ parameters.win_trt_folder_cuda11 }}" $(Agent.TempDirectory) + displayName: 'Download ${{ parameters.win_trt_folder_cuda11 }}' - powershell: | - azcopy.exe cp --recursive "https://lotusscus.blob.core.windows.net/models/local/TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6" $(Agent.TempDirectory) - displayName: 'Download TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6' + azcopy.exe cp --recursive "https://lotusscus.blob.core.windows.net/models/local/${{ parameters.win_trt_folder_cuda12 }}" $(Agent.TempDirectory) + displayName: 'Download ${{ variables.win_trt_folder_cuda12 }}' - task: BatchScript@1 displayName: 'setup env' diff --git a/tools/ci_build/github/azure-pipelines/templates/py-packaging-linux-test-cuda.yml b/tools/ci_build/github/azure-pipelines/templates/py-packaging-linux-test-cuda.yml index 6a74d0e7befd3..0473fc199a991 100644 --- a/tools/ci_build/github/azure-pipelines/templates/py-packaging-linux-test-cuda.yml +++ b/tools/ci_build/github/azure-pipelines/templates/py-packaging-linux-test-cuda.yml @@ -16,12 +16,6 @@ parameters: - name: docker_base_image type: string -- name: trt_version - type: string - default: '10.4.0.26-1.cuda11.8' - values: - - 10.4.0.26-1.cuda11.8 - - 10.4.0.26-1.cuda12.6 - name: cuda_version type: string default: '11.8' @@ -47,7 +41,14 @@ jobs: - job: Linux_Test_GPU${{ parameters.extra_job_id }}_${{ parameters.arch }} timeoutInMinutes: ${{ parameters.timeout }} variables: - skipComponentGovernanceDetection: true + - template: common-variables.yml + - name: skipComponentGovernanceDetection + value: true + - name: trt_version + ${{ if eq(parameters.cuda_version, '11.8') }}: + value: ${{ variables.linux_trt_version_cuda11 }} + ${{ if eq(parameters.cuda_version, '12.2') }}: + value: ${{ variables.linux_trt_version_cuda12 }} workspace: clean: all pool: ${{ parameters.machine_pool }} @@ -92,7 +93,7 @@ jobs: parameters: Dockerfile: tools/ci_build/github/linux/docker/inference/${{ parameters.arch }}/python/cuda/Dockerfile Context: tools/ci_build/github/linux/docker/inference/${{ parameters.arch }}/python/cuda - DockerBuildArgs: "--build-arg BASEIMAGE=${{ parameters.docker_base_image }} --build-arg TRT_VERSION=${{ parameters.trt_version }} --build-arg BUILD_UID=$( id -u )" + DockerBuildArgs: "--build-arg BASEIMAGE=${{ parameters.docker_base_image }} --build-arg TRT_VERSION=${{ variables.trt_version }} --build-arg BUILD_UID=$( id -u )" Repository: onnxruntimecuda${{ replace(parameters.cuda_version, '.', '') }}xtrt86build${{ parameters.arch }} - task: Bash@3 diff --git a/tools/ci_build/github/azure-pipelines/templates/py-packaging-selectable-stage.yml b/tools/ci_build/github/azure-pipelines/templates/py-packaging-selectable-stage.yml index 4310c7f7800fa..7f3a61997b2f8 100644 --- a/tools/ci_build/github/azure-pipelines/templates/py-packaging-selectable-stage.yml +++ b/tools/ci_build/github/azure-pipelines/templates/py-packaging-selectable-stage.yml @@ -379,9 +379,10 @@ stages: pool: 'onnxruntime-Win2022-GPU-A10' timeoutInMinutes: 300 variables: + - template: common-variables.yml CUDA_VERSION: '11.8' buildArch: x64 - EpBuildFlags: --use_tensorrt --tensorrt_home="$(Agent.TempDirectory)\TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8" --cuda_version=$(CUDA_VERSION) --cuda_home="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v$(CUDA_VERSION)" --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=37;50;52;60;61;70;75;80" + EpBuildFlags: --use_tensorrt --tensorrt_home="$(Agent.TempDirectory)\${{ variables.win_trt_folder_cuda11 }}" --cuda_version=$(CUDA_VERSION) --cuda_home="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v$(CUDA_VERSION)" --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=37;50;52;60;61;70;75;80" EnvSetupScript: setup_env_gpu.bat EP_NAME: gpu VSGenerator: 'Visual Studio 17 2022' diff --git a/tools/ci_build/github/azure-pipelines/win-gpu-tensorrt-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/win-gpu-tensorrt-ci-pipeline.yml index 7c04d6aa2e739..f4ab9ee5b4a5c 100644 --- a/tools/ci_build/github/azure-pipelines/win-gpu-tensorrt-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/win-gpu-tensorrt-ci-pipeline.yml @@ -37,11 +37,12 @@ parameters: - 12.2 variables: + - template: templates/common-variables.yml - name: win_trt_folder ${{ if eq(parameters.CudaVersion, '11.8') }}: - value: TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8 + value: ${{ variables.win_trt_folder_cuda11 }} ${{ if eq(parameters.CudaVersion, '12.2') }}: - value: TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6 + value: ${{ variables.win_trt_folder_cuda12 }} jobs: - job: 'build' diff --git a/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0 b/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0 index c1a445e29fc89..4ddfa65939906 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0 +++ b/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0 @@ -6,7 +6,7 @@ # Build base image with required system packages ARG BASEIMAGE=nvidia/cuda:12.5.1-cudnn-devel-ubi8 -ARG TRT_VERSION=10.4.0.26-1.cuda12.6 +ARG TRT_VERSION=10.5.0.18-1.cuda12.6 FROM $BASEIMAGE AS base ARG TRT_VERSION ENV PATH /opt/python/cp38-cp38/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH} diff --git a/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0_torch b/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0_torch index a228ebed165eb..44eafa08e5ef5 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0_torch +++ b/tools/ci_build/github/linux/docker/Dockerfile.package_ubi8_cuda_tensorrt10_0_torch @@ -6,7 +6,7 @@ # Build base image with required system packages ARG BASEIMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubi8 -ARG TRT_VERSION=10.4.0.26-1.cuda11.8 +ARG TRT_VERSION=10.5.0.18-1.cuda11.8 FROM $BASEIMAGE AS base ARG TRT_VERSION ENV PATH /opt/python/cp38-cp38/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/src/tensorrt/bin:${PATH} diff --git a/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu b/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu index 6a4244b7aad0d..f5855c65eb9bd 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu +++ b/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu @@ -6,7 +6,7 @@ # Build base image with required system packages ARG BASEIMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 -ARG TRT_VERSION=10.4.0.26-1+cuda11.8 +ARG TRT_VERSION=10.5.0.18-1+cuda11.8 ARG LD_LIBRARY_PATH_ARG=/usr/local/lib64:/usr/local/cuda/lib64 FROM $BASEIMAGE AS base ARG TRT_VERSION diff --git a/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu_ffmpeg b/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu_ffmpeg index 418c551ab38b4..47562467df893 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu_ffmpeg +++ b/tools/ci_build/github/linux/docker/Dockerfile.package_ubuntu_2004_gpu_ffmpeg @@ -6,7 +6,7 @@ # Build base image with required system packages ARG BASEIMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 -ARG TRT_VERSION=10.4.0.26-1+cuda11.8 +ARG TRT_VERSION=10.5.0.18-1+cuda11.8 ARG LD_LIBRARY_PATH_ARG=/usr/local/lib64:/usr/local/cuda/lib64 FROM $BASEIMAGE AS base ARG TRT_VERSION diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 index a7d8f220ea9b3..84dee11536033 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda11_tensorrt10 @@ -31,26 +31,26 @@ RUN pip install --upgrade pip RUN pip install psutil setuptools>=68.2.2 # Install TensorRT -RUN version="10.4.0.26-1+cuda11.8" &&\ +RUN TRT_VERSION="10.5.0.18-1+cuda11.8" &&\ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub &&\ apt-get update &&\ apt-get install -y \ - libnvinfer-dev=${version} \ - libnvinfer-dispatch-dev=${version} \ - libnvinfer-dispatch10=${version} \ - libnvinfer-headers-dev=${version} \ - libnvinfer-headers-plugin-dev=${version} \ - libnvinfer-lean-dev=${version} \ - libnvinfer-lean10=${version} \ - libnvinfer-plugin-dev=${version} \ - libnvinfer-plugin10=${version} \ - libnvinfer-vc-plugin-dev=${version} \ - libnvinfer-vc-plugin10=${version} \ - libnvinfer10=${version} \ - libnvonnxparsers-dev=${version} \ - libnvonnxparsers10=${version} \ - tensorrt-dev=${version} \ - libnvinfer-bin=${version} + libnvinfer-dev=${TRT_VERSION} \ + libnvinfer-dispatch-dev=${TRT_VERSION} \ + libnvinfer-dispatch10=${TRT_VERSION} \ + libnvinfer-headers-dev=${TRT_VERSION} \ + libnvinfer-headers-plugin-dev=${TRT_VERSION} \ + libnvinfer-lean-dev=${TRT_VERSION} \ + libnvinfer-lean10=${TRT_VERSION} \ + libnvinfer-plugin-dev=${TRT_VERSION} \ + libnvinfer-plugin10=${TRT_VERSION} \ + libnvinfer-vc-plugin-dev=${TRT_VERSION} \ + libnvinfer-vc-plugin10=${TRT_VERSION} \ + libnvinfer10=${TRT_VERSION} \ + libnvonnxparsers-dev=${TRT_VERSION} \ + libnvonnxparsers10=${TRT_VERSION} \ + tensorrt-dev=${TRT_VERSION} \ + libnvinfer-bin=${TRT_VERSION} # Compile trtexec if not installed RUN if [ ! -d /usr/src/tensorrt/bin ] || [ ! -f /usr/src/tensorrt/bin/trtexec ]; then \ diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 index 523318f09aba6..372e4f1763379 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_cuda12_tensorrt10 @@ -31,26 +31,26 @@ RUN pip install --upgrade pip RUN pip install setuptools>=68.2.2 psutil # Install TensorRT -RUN version="10.4.0.26-1+cuda12.6" &&\ +RUN TRT_VERSION="10.5.0.18-1+cuda12.6" &&\ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub &&\ apt-get update &&\ apt-get install -y \ - libnvinfer-dev=${version} \ - libnvinfer-dispatch-dev=${version} \ - libnvinfer-dispatch10=${version} \ - libnvinfer-headers-dev=${version} \ - libnvinfer-headers-plugin-dev=${version} \ - libnvinfer-lean-dev=${version} \ - libnvinfer-lean10=${version} \ - libnvinfer-plugin-dev=${version} \ - libnvinfer-plugin10=${version} \ - libnvinfer-vc-plugin-dev=${version} \ - libnvinfer-vc-plugin10=${version} \ - libnvinfer10=${version} \ - libnvonnxparsers-dev=${version} \ - libnvonnxparsers10=${version} \ - tensorrt-dev=${version} \ - libnvinfer-bin=${version} + libnvinfer-dev=${TRT_VERSION} \ + libnvinfer-dispatch-dev=${TRT_VERSION} \ + libnvinfer-dispatch10=${TRT_VERSION} \ + libnvinfer-headers-dev=${TRT_VERSION} \ + libnvinfer-headers-plugin-dev=${TRT_VERSION} \ + libnvinfer-lean-dev=${TRT_VERSION} \ + libnvinfer-lean10=${TRT_VERSION} \ + libnvinfer-plugin-dev=${TRT_VERSION} \ + libnvinfer-plugin10=${TRT_VERSION} \ + libnvinfer-vc-plugin-dev=${TRT_VERSION} \ + libnvinfer-vc-plugin10=${TRT_VERSION} \ + libnvinfer10=${TRT_VERSION} \ + libnvonnxparsers-dev=${TRT_VERSION} \ + libnvonnxparsers10=${TRT_VERSION} \ + tensorrt-dev=${TRT_VERSION} \ + libnvinfer-bin=${TRT_VERSION} # Compile trtexec if not installed RUN if [ ! -d /usr/src/tensorrt/bin ] || [ ! -f /usr/src/tensorrt/bin/trtexec ]; then \ diff --git a/tools/ci_build/github/linux/docker/inference/x86_64/python/cuda/Dockerfile b/tools/ci_build/github/linux/docker/inference/x86_64/python/cuda/Dockerfile index 54bdbebbd1319..b3855ffabd523 100644 --- a/tools/ci_build/github/linux/docker/inference/x86_64/python/cuda/Dockerfile +++ b/tools/ci_build/github/linux/docker/inference/x86_64/python/cuda/Dockerfile @@ -5,7 +5,7 @@ ARG BASEIMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubi8 FROM $BASEIMAGE -ARG TRT_VERSION=10.4.0.26-1.cuda11.8 +ARG TRT_VERSION=10.5.0.18-1.cuda11.8 #Install TensorRT only if TRT_VERSION is not empty RUN if [ -n "${TRT_VERSION}" ]; then \ diff --git a/tools/ci_build/github/windows/setup_env_gpu.bat b/tools/ci_build/github/windows/setup_env_gpu.bat index 6a660ecaa40d2..7285e66a149d1 100644 --- a/tools/ci_build/github/windows/setup_env_gpu.bat +++ b/tools/ci_build/github/windows/setup_env_gpu.bat @@ -6,10 +6,10 @@ if exist PATH=%AGENT_TEMPDIRECTORY%\v12.2\ ( ) else ( set PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.2\bin;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.2\extras\CUPTI\lib64;%PATH% ) -set PATH=%AGENT_TEMPDIRECTORY%\TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6\lib;%PATH% +set PATH=%AGENT_TEMPDIRECTORY%\TensorRT-10.5.0.18.Windows10.x86_64.cuda-12.6\lib;%PATH% @REM The default version is still cuda v12.2, because set cuda v11.8 after it -set PATH=%PATH%;%AGENT_TEMPDIRECTORY%\TensorRT-10.4.0.26.Windows10.x86_64.cuda-11.8\lib +set PATH=%PATH%;%AGENT_TEMPDIRECTORY%\TensorRT-10.5.0.18.Windows10.x86_64.cuda-11.8\lib if exist PATH=%AGENT_TEMPDIRECTORY%\v11.8\ ( set PATH=%PATH%;%AGENT_TEMPDIRECTORY%\v11.8\bin;%AGENT_TEMPDIRECTORY%\v11.8\extras\CUPTI\lib64 ) else ( diff --git a/tools/ci_build/github/windows/setup_env_trt.bat b/tools/ci_build/github/windows/setup_env_trt.bat index 4f2272e306570..3e4ecf66f457a 100644 --- a/tools/ci_build/github/windows/setup_env_trt.bat +++ b/tools/ci_build/github/windows/setup_env_trt.bat @@ -6,6 +6,6 @@ if exist PATH=%AGENT_TEMPDIRECTORY%\v12.2\ ( ) else ( set PATH=%PATH%;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.2\bin;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.2\extras\CUPTI\lib64 ) -set PATH=%AGENT_TEMPDIRECTORY%\TensorRT-10.4.0.26.Windows10.x86_64.cuda-12.6\lib;%PATH% +set PATH=%AGENT_TEMPDIRECTORY%\TensorRT-10.5.0.18.Windows10.x86_64.cuda-12.6\lib;%PATH% set GRADLE_OPTS=-Dorg.gradle.daemon=false set CUDA_MODULE_LOADING=LAZY