diff --git a/pyproject.toml b/pyproject.toml index f6230c8a74..bc6c6b60cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ dynamic = ["version"] [project.optional-dependencies] torchvision = ["torchvision >=0.20.dev,<0.21.0"] -quantization = ["nvidia-modelopt[all]>=0.15.1"] +quantization = ["nvidia-modelopt[deploy,hf,torch]~=0.17.0"] monitoring-tools = ["rich >= 13.7.1"] jupyter = ["rich[jupyter] >= 13.7.1"] diff --git a/tests/py/dynamo/models/test_models_export.py b/tests/py/dynamo/models/test_models_export.py index 8976c7fbc0..25116ce865 100644 --- a/tests/py/dynamo/models/test_models_export.py +++ b/tests/py/dynamo/models/test_models_export.py @@ -1,5 +1,6 @@ # type: ignore import importlib +import platform import unittest from importlib import metadata @@ -250,9 +251,10 @@ def calibrate_loop(model): @unittest.skipIf( - not importlib.util.find_spec("modelopt") - or Version(metadata.version("nvidia-modelopt")) < Version("0.16.1"), - "modelopt 0.16.1 or later is required Int8 quantization is supported in modelopt since 0.16.1 or later", + platform.system() != "Linux" + or not importlib.util.find_spec("modelopt") + or Version(metadata.version("nvidia-modelopt")) < Version("0.17.0"), + "modelopt 0.17.0 or later is required, Int8 quantization is supported in modelopt since 0.17.0 or later for linux", ) @pytest.mark.unit def test_base_int8(ir): diff --git a/tests/py/requirements.txt b/tests/py/requirements.txt index 460cc01027..f791444c8a 100644 --- a/tests/py/requirements.txt +++ b/tests/py/requirements.txt @@ -9,6 +9,5 @@ pytest-xdist>=3.6.1 pyyaml timm>=1.0.3 transformers==4.40.2 -# TODO @lanlao-nvidia Renable when modelopt can be install properly to run the tests -# "nvidia-modelopt[all]">=0.16.1,<0.17.0 +nvidia-modelopt[deploy,hf,torch]~=0.17.0 --extra-index-url https://pypi.nvidia.com