diff --git a/MODULE.bazel b/MODULE.bazel index 958ea92f1b..add7821fcb 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -79,20 +79,20 @@ http_archive( http_archive( name = "tensorrt", build_file = "@//third_party/tensorrt/archive:BUILD", - sha256 = "606436ed219c72d1a25a889b2b0ae5cb5a68499dd6f944da4cabb3c34c067d55", - strip_prefix = "TensorRT-10.1.0.27", + sha256 = "adff1cd5abe5d87013806172351e58fd024e5bf0fc61d49ef4b84cd38ed99081", + strip_prefix = "TensorRT-10.3.0.26", urls = [ - "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/tars/TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar.gz", + "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz", ], ) http_archive( name = "tensorrt_win", build_file = "@//third_party/tensorrt/archive:BUILD", - sha256 = "2eb98008944945377eb328871a308704e95bf3bb295fc548784c6da41a70bbed", - strip_prefix = "TensorRT-10.1.0.27", + sha256 = "2bb4bcb79e8c33575816d874b0512ea28c302af1c06ee6d224da71aa182f75e0", + strip_prefix = "TensorRT-10.3.0.26", urls = [ - "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/zip/TensorRT-10.1.0.27.Windows.win10.cuda-12.4.zip", + "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/zip/TensorRT-10.3.0.26.Windows.win10.cuda-12.5.zip", ], ) diff --git a/README.md b/README.md index a39ccefd33..03062bf7f7 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Torch-TensorRT [![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/) [![pytorch](https://img.shields.io/badge/PyTorch-2.4-green)](https://www.python.org/downloads/release/python-31013/) [![cuda](https://img.shields.io/badge/CUDA-12.4-green)](https://developer.nvidia.com/cuda-downloads) -[![trt](https://img.shields.io/badge/TensorRT-10.1.0-green)](https://github.com/nvidia/tensorrt-llm) +[![trt](https://img.shields.io/badge/TensorRT-10.3.0-green)](https://github.com/nvidia/tensorrt-llm) [![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE) [![linux_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml) [![windows_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml) @@ -119,7 +119,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR - Bazel 6.3.2 - Libtorch 2.5.0.dev (latest nightly) (built with CUDA 12.4) - CUDA 12.4 -- TensorRT 10.1.0.27 +- TensorRT 10.3.0.26 ## Deprecation Policy diff --git a/core/conversion/converters/impl/batch_norm.cpp b/core/conversion/converters/impl/batch_norm.cpp index 02535ffa66..07cf445f50 100644 --- a/core/conversion/converters/impl/batch_norm.cpp +++ b/core/conversion/converters/impl/batch_norm.cpp @@ -123,7 +123,7 @@ auto batch_norm_registrations TORCHTRT_UNUSED = // track_running_stats=True LOG_DEBUG("Args[3] running_mean : " << args[3].isIValue() << " / " << args[3].IValue()->isNone()); LOG_DEBUG("Args[4] running_var : " << args[4].isIValue() << " / " << args[4].IValue()->isNone()); - LOG_DEBUG("use_input_stats, momemtum, cudnn_enabled disregarded"); + LOG_DEBUG("use_input_stats, momemtum are disregarded"); LOG_DEBUG("ctx->input_is_dynamic : " << ctx->input_is_dynamic); // Expand spatial dims from 1D to 2D if needed @@ -154,6 +154,17 @@ auto batch_norm_registrations TORCHTRT_UNUSED = return true; } + auto cudnn_enabled = static_cast(args[8].unwrapToBool(false)); + if (!cudnn_enabled) { + LOG_DEBUG( + "cuDNN is not enabled, skipping instance_norm conversion. \ + Since TRT 10.0, cuDNN is loaded as a dynamic dependency, \ + so for some functionalities, users need to install correct \ + cuDNN version by themselves. Please see our support matrix \ + here: https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html."); + return false; + } + const int relu = 0; const float alpha = 0; LOG_DEBUG("Set parameter `relu` and `alpha` to 0"); diff --git a/dev_dep_versions.yml b/dev_dep_versions.yml index 4cc94e2f77..3b23c49da3 100644 --- a/dev_dep_versions.yml +++ b/dev_dep_versions.yml @@ -1,2 +1,2 @@ __cuda_version__: "12.4" -__tensorrt_version__: "10.1.0" +__tensorrt_version__: "10.3.0" diff --git a/docker/README.md b/docker/README.md index 7f69b7c789..3d44f45b74 100644 --- a/docker/README.md +++ b/docker/README.md @@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch ### Instructions -- The example below uses TensorRT 10.1.0.27 +- The example below uses TensorRT 10.3.0.26 - See dependencies for a list of current default dependencies. > From root of Torch-TensorRT repo Build: ``` -DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.1.0 -f docker/Dockerfile -t torch_tensorrt:latest . +DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.3.0 -f docker/Dockerfile -t torch_tensorrt:latest . ``` Run: diff --git a/packaging/pre_build_script_windows.sh b/packaging/pre_build_script_windows.sh index 3145bcfacd..0912598bc0 100644 --- a/packaging/pre_build_script_windows.sh +++ b/packaging/pre_build_script_windows.sh @@ -12,7 +12,7 @@ if [[ "${CU_VERSION::4}" < "cu12" ]]; then pyproject.toml fi -#curl -Lo TensorRT.zip https://developer.download.nvidia.com/compute/machine-learning/tensorrt/10.0.1/zip/TensorRT-10.0.1.6.Windows10.win10.cuda-12.4.zip +#curl -Lo TensorRT.zip https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/zip/TensorRT-10.3.0.26.Windows.win10.cuda-12.5.zip #unzip -o TensorRT.zip -d C:/ TORCH_TORCHVISION=$(grep "^torch" py/requirements.txt) INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION} diff --git a/packaging/smoke_test_script.sh b/packaging/smoke_test_script.sh index ea187eb5d1..19d9d717a4 100644 --- a/packaging/smoke_test_script.sh +++ b/packaging/smoke_test_script.sh @@ -2,5 +2,5 @@ # The issue was smoke test installs the built torch_tensorrt wheel file and checks `import torch_tensorrt; print(torch_tensorrt.__version__)` # Since tensorrt cannot be pip installable in CI, the smoke test will fail. # One way we tried to handle it is manually install tensorrt wheel while by extracting from the tarball. -# However, the TensorRT-10.1.0.27/lib path doesn't seem to show up in LD_LIBRARY_PATH even if we explicitly set it. +# However, the TensorRT-10.3.0.26/lib path doesn't seem to show up in LD_LIBRARY_PATH even if we explicitly set it. # TODO: Implement a custom smoke_test script to verify torch_tensorrt installation. \ No newline at end of file diff --git a/py/ci/Dockerfile.ci b/py/ci/Dockerfile.ci index 82a9dbdb7c..eddf12cefb 100644 --- a/py/ci/Dockerfile.ci +++ b/py/ci/Dockerfile.ci @@ -3,13 +3,13 @@ FROM pytorch/manylinux-builder:cuda12.4 RUN yum install -y ninja-build # download TensorRT tarball -RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/tars/TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar.gz \ -&& gunzip TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar.gz \ -&& tar -xvf TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar \ -&& rm TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar +RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz \ +&& gunzip TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz \ +&& tar -xvf TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar \ +&& rm TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar -ENV TENSORRT_DIR=/TensorRT-10.1.0.27 -ENV TENSORRT_VERSION=10.1.0 +ENV TENSORRT_DIR=/TensorRT-10.3.0.26 +ENV TENSORRT_VERSION=10.3.0 RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \ && mv bazelisk-linux-amd64 /usr/bin/bazel \ diff --git a/pyproject.toml b/pyproject.toml index 3772bceaf3..f6230c8a74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ requires = [ "cffi>=1.15.1", "typing-extensions>=4.7.0", "future>=0.18.3", - "tensorrt-cu12==10.1.0", + "tensorrt-cu12==10.3.0", "torch >=2.5.0.dev,<2.6.0", "pybind11==2.6.2", "numpy", @@ -55,9 +55,9 @@ keywords = [ ] dependencies = [ "torch >=2.5.0.dev,<2.6.0", - "tensorrt-cu12==10.1.0", - "tensorrt-cu12-bindings==10.1.0", - "tensorrt-cu12-libs==10.1.0", + "tensorrt-cu12==10.3.0", + "tensorrt-cu12-bindings==10.3.0", + "tensorrt-cu12-libs==10.3.0", "packaging>=23", "numpy", "typing-extensions>=4.7.0", diff --git a/tests/core/conversion/converters/test_instance_norm.cpp b/tests/core/conversion/converters/test_instance_norm.cpp index 8f9904ef84..2986d73cca 100644 --- a/tests/core/conversion/converters/test_instance_norm.cpp +++ b/tests/core/conversion/converters/test_instance_norm.cpp @@ -18,7 +18,7 @@ constexpr auto graph = R"IR( %running_mean.1 : Tensor?, %running_var.1 : Tensor?, %use_input_stats.1 : bool): - %cudnn_enabled.1 : bool = prim::Constant[value=1]() + %cudnn_enabled.1 : bool = prim::Constant[value=0]() %momentum.1 : float = prim::Constant[value=0.10000000000000001]() %eps.1 : float = prim::Constant[value=1.0000000000000001e-05]() %4 : Tensor = aten::instance_norm(%input.1, diff --git a/tests/py/ts/api/test_classes.py b/tests/py/ts/api/test_classes.py index 2a152cdec7..ee94e01740 100644 --- a/tests/py/ts/api/test_classes.py +++ b/tests/py/ts/api/test_classes.py @@ -309,10 +309,8 @@ def test_get_layer_info(self): """ { "Layers": [ - "reshape_before_%26 : Tensor = aten::matmul(%x.1, %25)", - "%26 : Tensor = aten::matmul(%x.1, %25) + [Freeze Tensor %27 : Tensor = trt::const(%10) ] + (Unnamed Layer* 4) [Shuffle] + unsqueeze_node_after_[Freeze Tensor %27 : Tensor = trt::const(%10) ] + (Unnamed Layer* 4) [Shuffle]_(Unnamed Layer* 4) [Shuffle]_output + %28 : Tensor = aten::add(%27, %26, %24)", - "%31 : Tensor = aten::matmul(%28, %30) + [Freeze Tensor %32 : Tensor = trt::const(%12) ] + (Unnamed Layer* 10) [Shuffle] + unsqueeze_node_after_[Freeze Tensor %32 : Tensor = trt::const(%12) ] + (Unnamed Layer* 10) [Shuffle]_(Unnamed Layer* 10) [Shuffle]_output + %33 : Tensor = aten::add(%32, %31, %29)", - "copied_squeeze_after_%33 : Tensor = aten::add(%32, %31, %29)" + "%26 : Tensor = aten::matmul(%x.1, %25)_myl0_0", + "%31 : Tensor = aten::matmul(%28, %30)_myl0_1" ], "Bindings": [ "input_0", @@ -326,7 +324,7 @@ def test_get_layer_info(self): trt_mod = TestTorchTensorRTModule._get_trt_mod() trt_json = json.loads(trt_mod.get_layer_info()) [self.assertTrue(k in trt_json.keys()) for k in ["Layers", "Bindings"]] - self.assertTrue(len(trt_json["Layers"]) == 4) + self.assertTrue(len(trt_json["Layers"]) == 2) self.assertTrue(len(trt_json["Bindings"]) == 2) diff --git a/toolchains/ci_workspaces/MODULE.bazel.tmpl b/toolchains/ci_workspaces/MODULE.bazel.tmpl index b897d35f56..49ad6f473a 100644 --- a/toolchains/ci_workspaces/MODULE.bazel.tmpl +++ b/toolchains/ci_workspaces/MODULE.bazel.tmpl @@ -67,20 +67,20 @@ http_archive( http_archive( name = "tensorrt", build_file = "@//third_party/tensorrt/archive:BUILD", - sha256 = "606436ed219c72d1a25a889b2b0ae5cb5a68499dd6f944da4cabb3c34c067d55", - strip_prefix = "TensorRT-10.1.0.27", + sha256 = "adff1cd5abe5d87013806172351e58fd024e5bf0fc61d49ef4b84cd38ed99081", + strip_prefix = "TensorRT-10.3.0.26", urls = [ - "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/tars/TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar.gz", + "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz", ], ) http_archive( name = "tensorrt_win", build_file = "@//third_party/tensorrt/archive:BUILD", - sha256 = "2eb98008944945377eb328871a308704e95bf3bb295fc548784c6da41a70bbed", - strip_prefix = "TensorRT-10.1.0.27", + sha256 = "2bb4bcb79e8c33575816d874b0512ea28c302af1c06ee6d224da71aa182f75e0", + strip_prefix = "TensorRT-10.3.0.26", urls = [ - "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/zip/TensorRT-10.1.0.27.Windows.win10.cuda-12.4.zip", + "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/zip/TensorRT-10.3.0.26.Windows.win10.cuda-12.5.zip", ], ) diff --git a/toolchains/legacy/WORKSPACE.win.release.tmpl b/toolchains/legacy/WORKSPACE.win.release.tmpl index ce3df15602..58fce5cf54 100644 --- a/toolchains/legacy/WORKSPACE.win.release.tmpl +++ b/toolchains/legacy/WORKSPACE.win.release.tmpl @@ -63,7 +63,7 @@ http_archive( new_local_repository( name = "tensorrt_win", - path = "C:/TensorRT-10.1.0.27", + path = "C:/TensorRT-10.3.0.26", build_file = "@//third_party/tensorrt/local:BUILD" ) diff --git a/toolchains/legacy/WORKSPACE.x86_64.release.rhel.tmpl b/toolchains/legacy/WORKSPACE.x86_64.release.rhel.tmpl index 5b18a48139..97b3a8c566 100644 --- a/toolchains/legacy/WORKSPACE.x86_64.release.rhel.tmpl +++ b/toolchains/legacy/WORKSPACE.x86_64.release.rhel.tmpl @@ -71,10 +71,10 @@ http_archive( http_archive( name = "tensorrt", build_file = "@//third_party/tensorrt/archive:BUILD", - sha256 = "606436ed219c72d1a25a889b2b0ae5cb5a68499dd6f944da4cabb3c34c067d55", - strip_prefix = "TensorRT-10.1.0.27", + sha256 = "adff1cd5abe5d87013806172351e58fd024e5bf0fc61d49ef4b84cd38ed99081", + strip_prefix = "TensorRT-10.3.0.26", urls = [ - "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/tars/TensorRT-10.1.0.27.Linux.x86_64-gnu.cuda-12.4.tar.gz", + "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz", ], )