Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docker/dist-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ set -x

TOP_DIR=$(cd $(dirname $0); pwd)/..

BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 -w dist"
BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/test/cu130 -w dist"

# TensorRT restricts our pip version
cd ${TOP_DIR} \
Expand Down
4 changes: 2 additions & 2 deletions py/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
numpy
packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu130
torch>=2.10.0.dev,<2.11.0
--extra-index-url https://download.pytorch.org/whl/test/cu130
torch>=2.10.0,<2.11.0
--extra-index-url https://pypi.ngc.nvidia.com
pyyaml
dllist
Expand Down
12 changes: 7 additions & 5 deletions py/torch_tensorrt/_features.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import importlib
# import importlib
import os
import sys
from collections import namedtuple
Expand Down Expand Up @@ -52,10 +52,12 @@
_WINDOWS_CROSS_COMPILE = check_cross_compile_trt_win_lib()
_TRTLLM_AVAIL = load_tensorrt_llm_for_nccl()

if importlib.util.find_spec("tensorrt.plugin"):
_QDP_PLUGIN_AVAIL = True
else:
_QDP_PLUGIN_AVAIL = False
# if importlib.util.find_spec("tensorrt.plugin"):
# _QDP_PLUGIN_AVAIL = True
# else:
# _QDP_PLUGIN_AVAIL = False

_QDP_PLUGIN_AVAIL = False

ENABLED_FEATURES = FeatureSet(
_TS_FE_AVAIL,
Expand Down
2 changes: 2 additions & 0 deletions py/torch_tensorrt/dynamo/conversion/plugins/_custom_op.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Callable, Optional

from torch.fx.node import Node
from torch_tensorrt._features import needs_qdp_plugin
from torch_tensorrt.dynamo._settings import CompilationSettings
from torch_tensorrt.dynamo.conversion._ConverterRegistry import ConverterPriority
from torch_tensorrt.dynamo.conversion.plugins._generate_plugin import generate_plugin
Expand All @@ -9,6 +10,7 @@
)


@needs_qdp_plugin
def custom_op(
op_name: str,
capability_validator: Optional[Callable[[Node, CompilationSettings], bool]] = None,
Expand Down
8 changes: 4 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ requires = [
"ninja>=1.11.0",
"pyyaml>=6.0",
"cffi>=1.15.1",
"torch>=2.10.0.dev,<2.11.0",
"torch>=2.10.0,<2.11.0",
"pybind11==2.6.2",
]
build-backend = "setuptools.build_meta"
Expand All @@ -32,7 +32,7 @@ classifiers = [
"Topic :: Software Development :: Libraries",
]
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10, <=3.13"
requires-python = ">=3.10"
keywords = [
"pytorch",
"torch",
Expand Down Expand Up @@ -103,10 +103,10 @@ index-strategy = "unsafe-best-match"

[tool.uv.sources]
torch = [
{ index = "pytorch-nightly-cu130" },
{ index = "pytorch-test-cu130" },
]
torchvision = [
{ index = "pytorch-nightly-cu130" },
{ index = "pytorch-test-cu130" },
]

[[tool.uv.index]]
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -742,7 +742,7 @@ def get_sbsa_requirements(base_requirements):
# TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
# also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
return requirements + [
"torch>=2.10.0.dev,<2.11.0",
"torch>=2.10.0,<2.11.0",
"tensorrt>=10.14.1,<10.15.0",
]

Expand All @@ -753,7 +753,7 @@ def get_x86_64_requirements(base_requirements):
if IS_DLFW_CI:
return requirements
else:
requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
requirements = requirements + ["torch>=2.10.0,<2.11.0"]
if USE_TRT_RTX:
return requirements + [
"tensorrt_rtx>=1.2.0.54",
Expand Down
9 changes: 4 additions & 5 deletions tests/py/dynamo/automatic_plugin/test_automatic_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,12 @@

import torch
import torch.nn as nn
import torch_tensorrt
import triton
import triton.language as tl
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests

import torch_tensorrt

from ..conversion.harness import DispatchTestCase


Expand Down Expand Up @@ -56,15 +55,15 @@ def elementwise_mul(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x


if not torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx:
if torch_tensorrt.ENABLED_FEATURES.qdp_plugin:
torch_tensorrt.dynamo.conversion.plugins.custom_op(
"torchtrt_ex::elementwise_mul", supports_dynamic_shapes=True
)


@unittest.skipIf(
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx,
"TensorRT RTX does not support plugins",
not torch_tensorrt.ENABLED_FEATURES.qdp_plugin,
"QDP plugin is not enabled",
)
class TestAutomaticPlugin(DispatchTestCase):

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,12 @@

import torch
import torch.nn as nn
import torch_tensorrt
import triton
import triton.language as tl
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests

import torch_tensorrt

from ..conversion.harness import DispatchTestCase


Expand Down Expand Up @@ -57,15 +56,15 @@ def _(x: torch.Tensor, y: torch.Tensor, b: float = 0.2, a: int = 2) -> torch.Ten
return x


if not torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx:
if torch_tensorrt.ENABLED_FEATURES.qdp_plugin:
torch_tensorrt.dynamo.conversion.plugins.custom_op(
"torchtrt_ex::elementwise_scale_mul", supports_dynamic_shapes=True
)


@unittest.skipIf(
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx,
"TensorRT RTX does not support plugins",
not torch_tensorrt.ENABLED_FEATURES.qdp_plugin,
"TensorRT RTX does not support plugins or QDP plugin is not enabled",
)
class TestAutomaticPlugin(DispatchTestCase):

Expand Down
7 changes: 3 additions & 4 deletions tests/py/dynamo/automatic_plugin/test_flashinfer_rmsnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,9 @@
import pytest
import torch
import torch.nn as nn
import torch_tensorrt
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests

import torch_tensorrt
from torch_tensorrt._enums import dtype

from ..conversion.harness import DispatchTestCase
Expand All @@ -28,7 +27,7 @@ def _(input: torch.Tensor, weight: torch.Tensor, b: float = 1e-6) -> torch.Tenso
return input


if not torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx:
if torch_tensorrt.ENABLED_FEATURES.qdp_plugin:
torch_tensorrt.dynamo.conversion.plugins.custom_op(
"flashinfer::rmsnorm", supports_dynamic_shapes=True
)
Expand All @@ -37,7 +36,7 @@ def _(input: torch.Tensor, weight: torch.Tensor, b: float = 1e-6) -> torch.Tenso
@unittest.skip("Not Available")
@unittest.skipIf(
not importlib.util.find_spec("flashinfer")
or torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx,
or not torch_tensorrt.ENABLED_FEATURES.qdp_plugin,
"flashinfer not installed or TensorRT RTX is present",
)
class TestAutomaticPlugin(DispatchTestCase):
Expand Down
4 changes: 2 additions & 2 deletions tests/py/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,6 @@ nvidia-modelopt[all]; python_version >'3.9' and python_version <'3.13'
# flashinfer-python is not supported for python version 3.13 or higher
# flashinfer-python is broken on python 3.9 at the moment, so skip it for now
flashinfer-python; python_version >'3.9' and python_version <'3.13'
--extra-index-url https://download.pytorch.org/whl/nightly/cu130
torchvision>=0.25.0.dev,<0.26.0
--extra-index-url https://download.pytorch.org/whl/test/cu130
torchvision>=0.25.0,<0.26.0
timm>=1.0.3
Loading
Loading