Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .github/scripts/generate-tensorrt-test-matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.2/zip/TensorRT-10.13.2.6.Windows.win10.cuda-12.9.zip",
"strip_prefix": "TensorRT-10.13.2.6",
},
"10.14.1": {
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/zip/TensorRT-10.14.1.48.Windows.win10.cuda-12.9.zip",
"strip_prefix": "TensorRT-10.14.1.48",
},
},
"linux": {
"10.3.0": {
Expand Down Expand Up @@ -94,6 +98,10 @@
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.2/tars/TensorRT-10.13.2.6.Linux.x86_64-gnu.cuda-12.9.tar.gz",
"strip_prefix": "TensorRT-10.13.2.6",
},
"10.14.1": {
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.x86_64-gnu.cuda-12.9.tar.gz",
"strip_prefix": "TensorRT-10.14.1.48",
},
},
}

Expand Down
17 changes: 12 additions & 5 deletions .github/scripts/install-tensorrt-rtx.sh
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@

install_tensorrt_rtx() {
if [[ ${USE_TRT_RTX} == true ]]; then
TRT_RTX_VERSION=1.0.0.21
if [[ ${CU_VERSION:2:2} == "13" ]]; then
export CU_UPPERBOUND="13.0"
else
export CU_UPPERBOUND="12.9"
fi
TRT_RTX_VERSION=1.2.0.54
install_wheel_or_not=${1:-false}
echo "It is the tensorrt-rtx build, install tensorrt-rtx with install_wheel_or_not:${install_wheel_or_not}"
PLATFORM=$(python -c "import sys; print(sys.platform)")
Expand All @@ -15,24 +20,26 @@ install_tensorrt_rtx() {
# python version is like 3.11, we need to convert it to cp311
CPYTHON_TAG="cp${PYTHON_VERSION//./}"
if [[ ${PLATFORM} == win32 ]]; then
curl -L https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.0/TensorRT-RTX-${TRT_RTX_VERSION}.Windows.win10.cuda-12.9.zip -o TensorRT-RTX-${TRT_RTX_VERSION}.Windows.win10.cuda-12.9.zip
unzip TensorRT-RTX-${TRT_RTX_VERSION}.Windows.win10.cuda-12.9.zip
curl -L https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.2/tensorrt-rtx-${TRT_RTX_VERSION}-win10-amd64-cuda-${CU_UPPERBOUND}-release-external.zip -o tensorrt-rtx-${TRT_RTX_VERSION}.win10-amd64-cuda-${CU_UPPERBOUND}.zip
unzip tensorrt-rtx-${TRT_RTX_VERSION}.win10-amd64-cuda-${CU_UPPERBOUND}.zip
rtx_lib_dir=${PWD}/TensorRT-RTX-${TRT_RTX_VERSION}/lib
export PATH=${rtx_lib_dir}:$PATH
echo "PATH: $PATH"
if [[ ${install_wheel_or_not} == true ]]; then
pip install TensorRT-RTX-${TRT_RTX_VERSION}/python/tensorrt_rtx-${TRT_RTX_VERSION}-${CPYTHON_TAG}-none-win_amd64.whl
fi
else
curl -L https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.0/TensorRT-RTX-${TRT_RTX_VERSION}.Linux.x86_64-gnu.cuda-12.9.tar.gz -o TensorRT-RTX-${TRT_RTX_VERSION}.Linux.x86_64-gnu.cuda-12.9.tar.gz
tar -xzf TensorRT-RTX-${TRT_RTX_VERSION}.Linux.x86_64-gnu.cuda-12.9.tar.gz
curl -L https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.2/tensorrt-rtx-${TRT_RTX_VERSION}-linux-x86_64-cuda-${CU_UPPERBOUND}-release-external.tar.gz -o tensorrt-rtx-${TRT_RTX_VERSION}-linux-x86_64-cuda-${CU_UPPERBOUND}-release-external.tar.gz
tar -xzf tensorrt-rtx-${TRT_RTX_VERSION}-linux-x86_64-cuda-${CU_UPPERBOUND}-release-external.tar.gz
rtx_lib_dir=${PWD}/TensorRT-RTX-${TRT_RTX_VERSION}/lib
export LD_LIBRARY_PATH=${rtx_lib_dir}:$LD_LIBRARY_PATH
echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
if [[ ${install_wheel_or_not} == true ]]; then
pip install TensorRT-RTX-${TRT_RTX_VERSION}/python/tensorrt_rtx-${TRT_RTX_VERSION}-${CPYTHON_TAG}-none-linux_x86_64.whl
fi
fi
# clean up the downloaded rtx tarball
rm tensorrt-rtx*.tar.gz
else
echo "It is the standard tensorrt build, skip install tensorrt-rtx"
fi
Expand Down
20 changes: 10 additions & 10 deletions MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -101,27 +101,27 @@ http_archive(
http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.13.3.9",
strip_prefix = "TensorRT-10.14.1.48",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/tars/TensorRT-10.13.3.9.Linux.x86_64-gnu.cuda-13.0.tar.gz",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.x86_64-gnu.cuda-13.0.tar.gz",
],
)

http_archive(
name = "tensorrt_rtx",
build_file = "@//third_party/tensorrt_rtx/archive:BUILD",
strip_prefix = "TensorRT-RTX-1.0.0.21",
strip_prefix = "TensorRT-RTX-1.2.0.54",
urls = [
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.0/TensorRT-RTX-1.0.0.21.Linux.x86_64-gnu.cuda-12.9.tar.gz",
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.2/tensorrt-rtx-1.2.0.54-linux-x86_64-cuda-13.0-release-external.tar.gz",
],
)

http_archive(
name = "tensorrt_sbsa",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.13.3.9",
strip_prefix = "TensorRT-10.14.1.48",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/tars/TensorRT-10.13.3.9.Linux.aarch64-gnu.cuda-13.0.tar.gz",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.aarch64-gnu.cuda-13.0.tar.gz",
],
)

Expand All @@ -137,18 +137,18 @@ http_archive(
http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.13.3.9",
strip_prefix = "TensorRT-10.14.1.48",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/zip/TensorRT-10.13.3.9.Windows.win10.cuda-13.0.zip",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/zip/TensorRT-10.14.1.48.Windows.win10.cuda-13.0.zip",
],
)

http_archive(
name = "tensorrt_rtx_win",
build_file = "@//third_party/tensorrt_rtx/archive:BUILD",
strip_prefix = "TensorRT-RTX-1.0.0.21",
strip_prefix = "TensorRT-RTX-1.2.0.54",
urls = [
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.0/TensorRT-RTX-1.0.0.21.Windows.win10.cuda-12.9.zip",
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.2/tensorrt-rtx-1.2.0.54-win10-amd64-cuda-13.0-release-external.zip",
],
)
####################################################################################
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Torch-TensorRT
[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/)
[![pytorch](https://img.shields.io/badge/PyTorch-2.10-green)](https://download.pytorch.org/whl/nightly/cu130)
[![cuda](https://img.shields.io/badge/CUDA-13.0-green)](https://developer.nvidia.com/cuda-downloads)
[![trt](https://img.shields.io/badge/TensorRT-10.13.0-green)](https://github.com/nvidia/tensorrt)
[![trt](https://img.shields.io/badge/TensorRT-10.14.0-green)](https://github.com/nvidia/tensorrt)
[![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE)
[![Linux x86-64 Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml)
[![Linux SBSA Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml)
Expand Down Expand Up @@ -123,7 +123,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
- Bazel 8.1.1
- Libtorch 2.10.0.dev (latest nightly)
- CUDA 13.0 (CUDA 12.6 on Jetson)
- TensorRT 10.13.2.6 (TensorRT 10.3 on Jetson)
- TensorRT 10.14.1.48 (TensorRT 10.3 on Jetson)

## Deprecation Policy

Expand Down
6 changes: 3 additions & 3 deletions dev_dep_versions.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__cuda_version__: "12.8"
__tensorrt_version__: "10.13.3"
__tensorrt_rtx_version__: "1.0.0"
__cuda_version__: "13.0"
__tensorrt_version__: "10.14.0"
__tensorrt_rtx_version__: "1.2.0"
__tensorrt_llm_version__: "0.17.0.post1"
14 changes: 7 additions & 7 deletions docsrc/getting_started/tensorrt_rtx.rst
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ Install TensorRT-RTX Tarball
~~~~~~~~~~~~~~~~~~~~~~~~~~~~

TensorRT-RTX tarball can be downloaded from https://developer.nvidia.com/tensorrt-rtx.
Currently, Torch-TensorRT uses TensorRT-RTX version **1.0.0.21**.
Currently, Torch-TensorRT uses TensorRT-RTX version **1.2.0.54**.

Once downloaded:

Expand All @@ -62,8 +62,8 @@ Make sure you add the lib path to the ``LD_LIBRARY_PATH`` environment variable.

.. code-block:: sh

# If TensorRT-RTX is downloaded in /your_local_download_path/TensorRT-RTX-1.0.0.21
export LD_LIBRARY_PATH=/your_local_download_path/TensorRT-RTX-1.0.0.21/lib:$LD_LIBRARY_PATH
# If TensorRT-RTX is downloaded in /your_local_download_path/TensorRT-RTX-1.2.0.54
export LD_LIBRARY_PATH=/your_local_download_path/TensorRT-RTX-1.2.0.54/lib:$LD_LIBRARY_PATH
echo $LD_LIBRARY_PATH | grep TensorRT-RTX

**In Windows:**
Expand All @@ -72,8 +72,8 @@ Make sure you add the lib path to the Windows system variable ``PATH``.

.. code-block:: sh

# If TensorRT-RTX is downloaded in C:\your_local_download_path\TensorRT-RTX-1.0.0.21
set PATH="%PATH%;C:\your_local_download_path\TensorRT-RTX-1.0.0.21\lib"
# If TensorRT-RTX is downloaded in C:\your_local_download_path\TensorRT-RTX-1.2.0.54
set PATH="%PATH%;C:\your_local_download_path\TensorRT-RTX-1.2.0.54\lib"
echo %PATH% | findstr TensorRT-RTX

Install TensorRT-RTX Wheel
Expand All @@ -84,8 +84,8 @@ You must install it manually from the downloaded tarball.

.. code-block:: sh

# If the tarball is downloaded in /your_local_download_path/TensorRT-RTX-1.0.0.21
python -m pip install /your_local_download_path/TensorRT-RTX-1.0.0.21/python/tensorrt_rtx-1.0.0.21-cp39-none-linux_x86_64.whl
# If the tarball is downloaded in /your_local_download_path/TensorRT-RTX-1.2.0.54
python -m pip install /your_local_download_path/TensorRT-RTX-1.2.0.54/python/tensorrt_rtx-1.2.0.54-cp39-none-linux_x86_64.whl

Build Torch-TensorRT with TensorRT-RTX
--------------------------------------
Expand Down
13 changes: 7 additions & 6 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -754,14 +754,15 @@ def get_requirements():
requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
if USE_TRT_RTX:
requirements = requirements + [
"tensorrt_rtx>=1.0.0.21",
"tensorrt_rtx>=1.2.0.54",
]
else:
requirements = requirements + [
"tensorrt>=10.13.0,<10.14.0",
f"{tensorrt_prefix}>=10.13.0,<10.14.0",
f"{tensorrt_prefix}-bindings>=10.13.0,<10.14.0",
f"{tensorrt_prefix}-libs>=10.13.0,<10.14.0",
# directly use tensorrt>=10.14.0,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
# which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 or tensorrt_cu13 here
f"{tensorrt_prefix}>=10.14.0,<10.15.0",
f"{tensorrt_prefix}-bindings>=10.14.0,<10.15.0",
f"{tensorrt_prefix}-libs>=10.14.0,<10.15.0",
]
return requirements

Expand All @@ -781,7 +782,7 @@ def get_sbsa_requirements():
# also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
return sbsa_requirements + [
"torch>=2.10.0.dev,<2.11.0",
"tensorrt>=10.13.0,<10.14.0",
"tensorrt>=10.14.0,<10.15.0",
]


Expand Down
4 changes: 2 additions & 2 deletions third_party/tensorrt_rtx/archive/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ cc_library(
cc_import(
name = "nvinfer_lib",
shared_library = select({
":rtx_win": "lib/tensorrt_rtx_1_0.dll",
":rtx_win": "lib/tensorrt_rtx_1_2.dll",
":rtx_x86_64": "lib/libtensorrt_rtx.so",
}),
visibility = ["//visibility:private"],
Expand All @@ -47,7 +47,7 @@ cc_import(
cc_import(
name = "nvinfer_static_lib",
static_library = select({
":rtx_win": "lib/tensorrt_rtx_1_0.lib",
":rtx_win": "lib/tensorrt_rtx_1_2.lib",
}),
visibility = ["//visibility:private"],
)
Expand Down
4 changes: 2 additions & 2 deletions third_party/tensorrt_rtx/local/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ cc_library(
cc_import(
name = "nvinfer_static_lib",
static_library = select({
":rtx_win": "lib/tensorrt_rtx_1_0.lib",
":rtx_win": "lib/tensorrt_rtx_1_2.lib",
}),
visibility = ["//visibility:private"],
)

cc_import(
name = "nvinfer_lib",
shared_library = select({
":rtx_win": "lib/tensorrt_rtx_1_0.dll",
":rtx_win": "bin/tensorrt_rtx_1_2.dll",
":rtx_x86_64": "lib/libtensorrt_rtx.so",
}),
visibility = ["//visibility:private"],
Expand Down
20 changes: 10 additions & 10 deletions toolchains/ci_workspaces/MODULE.bazel.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -75,27 +75,27 @@ http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "ht
http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.13.3.9",
strip_prefix = "TensorRT-10.14.1.48",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/tars/TensorRT-10.13.3.9.Linux.x86_64-gnu.cuda-${CU_UPPERBOUND}.tar.gz",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.x86_64-gnu.cuda-${CU_UPPERBOUND}.tar.gz",
],
)

http_archive(
name = "tensorrt_rtx",
build_file = "@//third_party/tensorrt_rtx/archive:BUILD",
strip_prefix = "TensorRT-RTX-1.0.0.21",
strip_prefix = "TensorRT-RTX-1.2.0.54",
urls = [
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.0/TensorRT-RTX-1.0.0.21.Linux.x86_64-gnu.cuda-12.9.tar.gz",
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.2/tensorrt-rtx-1.2.0.54-linux-x86_64-cuda-${CU_UPPERBOUND}-release-external.tar.gz",
],
)

http_archive(
name = "tensorrt_sbsa",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.13.3.9",
strip_prefix = "TensorRT-10.14.1.48",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/tars/TensorRT-10.13.3.9.Linux.aarch64-gnu.cuda-13.0.tar.gz",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.aarch64-gnu.cuda-13.0.tar.gz",
],
)

Expand All @@ -111,18 +111,18 @@ http_archive(
http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.13.3.9",
strip_prefix = "TensorRT-10.14.1.48",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/zip/TensorRT-10.13.3.9.Windows.win10.cuda-${CU_UPPERBOUND}.zip",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/zip/TensorRT-10.14.1.48.Windows.win10.cuda-${CU_UPPERBOUND}.zip",
],
)

http_archive(
name = "tensorrt_rtx_win",
build_file = "@//third_party/tensorrt_rtx/archive:BUILD",
strip_prefix = "TensorRT-RTX-1.0.0.21",
strip_prefix = "TensorRT-RTX-1.2.0.54",
urls = [
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.0/TensorRT-RTX-1.0.0.21.Windows.win10.cuda-12.9.zip",
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.2/tensorrt-rtx-1.2.0.54-win10-amd64-cuda-${CU_UPPERBOUND}-release-external.zip",
],
)

Expand Down
2 changes: 1 addition & 1 deletion tools/perf/Flux/create_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ bazel
cd /home/TensorRT

python -m pip install --pre -e . --extra-index-url https://download.pytorch.org/whl/nightly/cu130
pip install tensorrt==10.13.2.6 --force-reinstall
pip install tensorrt==10.14.1.48 --force-reinstall

pip3 install --pre torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu130

Expand Down
Loading