Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove torchvision as dependency #3158

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions .github/scripts/install-torch-tensorrt.sh
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
set -exou pipefail
set -x

TORCH_TORCHVISION=$(grep "^torch" ${PWD}/py/requirements.txt)
TORCH=$(grep "^torch" ${PWD}/py/requirements.txt)
INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION}
PLATFORM=$(python -c "import sys; print(sys.platform)")

# Install all the dependencies required for Torch-TensorRT
pip install --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL}
pip install --pre ${TORCH} --index-url ${INDEX_URL}

# Install optional torchvision required for Torch-TensorRT tests
TORCHVISION=$(grep "^torchvision" ${PWD}/tests/py/requirements.txt)
pip install --pre ${TORCHVISION} --index-url ${INDEX_URL}

# Install optional dependencies required for Torch-TensorRT tests
pip install --pre -r ${PWD}/tests/py/requirements.txt --use-deprecated legacy-resolver

# Install Torch-TensorRT
Expand Down
4 changes: 2 additions & 2 deletions packaging/pre_build_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@ wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-l
&& mv bazelisk-linux-amd64 /usr/bin/bazel \
&& chmod +x /usr/bin/bazel

TORCH_TORCHVISION=$(grep "^torch" py/requirements.txt)
TORCH=$(grep "^torch" py/requirements.txt)
INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION}

# Install all the dependencies required for Torch-TensorRT
pip uninstall -y torch torchvision
pip install --force-reinstall --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL}
pip install --force-reinstall --pre ${TORCH} --index-url ${INDEX_URL}

export TORCH_BUILD_NUMBER=$(python -c "import torch, urllib.parse as ul; print(ul.quote_plus(torch.__version__))")
export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))")
Expand Down
4 changes: 2 additions & 2 deletions packaging/pre_build_script_windows.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ fi

#curl -Lo TensorRT.zip https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/zip/TensorRT-10.3.0.26.Windows.win10.cuda-12.5.zip
#unzip -o TensorRT.zip -d C:/
TORCH_TORCHVISION=$(grep "^torch" py/requirements.txt)
TORCH=$(grep "^torch" py/requirements.txt)
INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION}

# Install all the dependencies required for Torch-TensorRT
pip uninstall -y torch torchvision
pip install --force-reinstall --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL}
pip install --force-reinstall --pre ${TORCH} --index-url ${INDEX_URL}

export CUDA_HOME="$(echo ${CUDA_PATH} | sed -e 's#\\#\/#g')"
export TORCH_INSTALL_PATH="$(python -c "import torch, os; print(os.path.dirname(torch.__file__))" | sed -e 's#\\#\/#g')"
Expand Down
1 change: 0 additions & 1 deletion py/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,5 @@ packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu124
torch>=2.5.0.dev,<2.6.0
torchvision>=0.20.0.dev,<0.21.0
--extra-index-url https://pypi.ngc.nvidia.com
pyyaml
4 changes: 1 addition & 3 deletions tests/py/core/test_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,12 @@
import unittest
from typing import Dict

import tensorrt as trt
import torch
import torch_tensorrt
import torch_tensorrt as torchtrt
import torchvision.models as models
from torch_tensorrt.dynamo.runtime._TorchTensorRTModule import TorchTensorRTModule

import tensorrt as trt


class TestDevice(unittest.TestCase):
def test_from_string_constructor(self):
Expand Down
2 changes: 2 additions & 0 deletions tests/py/dynamo/models/test_dyn_models.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# type: ignore

import importlib
import unittest

import pytest
Expand Down Expand Up @@ -175,6 +176,7 @@ def forward(self, x):
)


@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_resnet_dynamic(ir):
"""
Expand Down
22 changes: 21 additions & 1 deletion tests/py/dynamo/models/test_engine_cache.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# type: ignore
import importlib
import os
import shutil
import unittest
Expand All @@ -7,7 +8,6 @@
import pytest
import torch
import torch_tensorrt as torch_trt
import torchvision.models as models
from torch.testing._internal.common_utils import TestCase
from torch_tensorrt.dynamo._defaults import TIMING_CACHE_PATH
from torch_tensorrt.dynamo._engine_cache import BaseEngineCache
Expand Down Expand Up @@ -178,8 +178,12 @@ def test_engine_settings_is_not_equal(self):

class TestEngineCache(TestCase):

@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.xfail
def test_dynamo_compile_with_default_disk_engine_cache(self):

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
example_inputs = (torch.randn((100, 3, 224, 224)).to("cuda"),)
# Mark the dim0 of inputs as dynamic
Expand Down Expand Up @@ -252,7 +256,11 @@ def remove_timing_cache(path=TIMING_CACHE_PATH):
msg=f"Engine caching didn't speed up the compilation. Time taken without engine caching: {times[0]} ms, time taken with engine caching: {times[2]} ms",
)

@unittest.skipIf(not importlib.util.find_spec("torchvision"))
def test_dynamo_compile_with_custom_engine_cache(self):

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")

engine_cache_dir = "/tmp/test_torch_dynamo_with_custom_engine_cache"
Expand Down Expand Up @@ -316,8 +324,12 @@ def test_dynamo_compile_with_custom_engine_cache(self):
for h, count in custom_engine_cache.hashes.items()
]

@unittest.skipIf(not importlib.util.find_spec("torchvision"))
def test_dynamo_compile_change_input_shape(self):
"""Runs compilation 3 times, the cache should miss each time"""

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
# Mark the dim0 of inputs as dynamic

Expand Down Expand Up @@ -348,8 +360,12 @@ def test_dynamo_compile_change_input_shape(self):
for h, count in custom_engine_cache.hashes.items()
]

@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.xfail
def test_torch_compile_with_default_disk_engine_cache(self):

import torchvision.models as models

# Custom Engine Cache
model = models.resnet18(pretrained=True).eval().to("cuda")

Expand Down Expand Up @@ -422,7 +438,11 @@ def remove_timing_cache(path=TIMING_CACHE_PATH):
msg=f"Engine caching didn't speed up the compilation. Time taken without engine caching: {times[0]} ms, time taken with engine caching: {times[2]} ms",
)

@unittest.skipIf(not importlib.util.find_spec("torchvision"))
def test_torch_compile_with_custom_engine_cache(self):

import torchvision.models as models

# Custom Engine Cache
model = models.resnet18(pretrained=True).eval().to("cuda")

Expand Down
1 change: 0 additions & 1 deletion tests/py/dynamo/models/test_export_kwargs_serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import torch
import torch.nn.functional as F
import torch_tensorrt as torchtrt
import torchvision.models as models
from torch import nn
from torch_tensorrt.dynamo._compiler import (
convert_exported_program_to_serialized_trt_engine,
Expand Down
8 changes: 7 additions & 1 deletion tests/py/dynamo/models/test_export_serde.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import importlib
import os
import tempfile
import unittest

import pytest
import torch
import torch_tensorrt as torchtrt
import torchvision.models as models
from torch_tensorrt.dynamo.utils import COSINE_THRESHOLD, cosine_similarity

assertions = unittest.TestCase()
Expand Down Expand Up @@ -242,11 +242,14 @@ def forward(self, x):
)


@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_resnet18(ir):
"""
This tests export save and load functionality on Resnet18 model
"""
import torchvision.models as models

model = models.resnet18().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")

Expand Down Expand Up @@ -283,11 +286,14 @@ def test_resnet18(ir):
)


@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_resnet18_dynamic(ir):
"""
This tests export save and load functionality on Resnet18 model
"""
import torchvision.models as models

model = models.resnet18().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")

Expand Down
33 changes: 30 additions & 3 deletions tests/py/dynamo/models/test_model_refit.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import importlib
import os
import tempfile
import time
import unittest

import numpy as np
import pytest
import tensorrt as trt
import torch
import torch.nn.functional as F
import torch_tensorrt as torchtrt
import torch_tensorrt as torch_trt
import torchvision.models as models
from torch import nn
from torch_tensorrt.dynamo import refit_module_weights
from torch_tensorrt.dynamo._refit import (
Expand All @@ -24,17 +25,17 @@
from torch_tensorrt.logging import TRT_LOGGER
from transformers import BertModel

import tensorrt as trt

assertions = unittest.TestCase()


@unittest.skipIf(
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_mapping():
import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
Expand Down Expand Up @@ -89,9 +90,12 @@ def test_mapping():
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_with_weightmap():

import torchvision.models as models

model = models.resnet18(pretrained=False).eval().to("cuda")
model2 = models.resnet18(pretrained=True).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down Expand Up @@ -139,9 +143,12 @@ def test_refit_one_engine_with_weightmap():
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_no_map_with_weightmap():

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down Expand Up @@ -190,9 +197,12 @@ def test_refit_one_engine_no_map_with_weightmap():
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_with_wrong_weightmap():

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down Expand Up @@ -300,8 +310,12 @@ def test_refit_one_engine_bert_with_weightmap():
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_inline_runtime__with_weightmap():

import torchvision.models as models

trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
Expand Down Expand Up @@ -346,9 +360,12 @@ def test_refit_one_engine_inline_runtime__with_weightmap():
torch._dynamo.reset()


@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_python_runtime_with_weightmap():

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down Expand Up @@ -466,9 +483,12 @@ def forward(self, x):
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_without_weightmap():

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down Expand Up @@ -570,8 +590,12 @@ def test_refit_one_engine_bert_without_weightmap():
not torch_trt.ENABLED_FEATURES.torch_tensorrt_runtime,
"TorchScript Frontend is not available",
)
@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_without_weightmap():

import torchvision.models as models

trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
Expand Down Expand Up @@ -616,9 +640,12 @@ def test_refit_one_engine_inline_runtime_without_weightmap():
torch._dynamo.reset()


@unittest.skipIf(not importlib.util.find_spec("torchvision"))
@pytest.mark.unit
def test_refit_one_engine_python_runtime_without_weightmap():

import torchvision.models as models

model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down
Loading
Loading