From bc734dfaaa07b559cd2ae7426ac523ead81dd529 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 24 Jul 2023 13:33:11 +0200 Subject: [PATCH] [PT FE] Enable quantized conv and linear tests (#18723) --- .../pytorch_tests/test_quantized_convnd.py | 38 +++++++++++-------- .../pytorch_tests/test_quantized_linear.py | 6 +-- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_convnd.py b/tests/layer_tests/pytorch_tests/test_quantized_convnd.py index 7424636eea375e..81ffb144bce537 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_convnd.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_convnd.py @@ -38,7 +38,8 @@ def __init__(self): self.conv.zero_point = int(zero_point) def forward(self, x): - x_quantized = torch.quantize_per_tensor(x, 1.0, 0, torch.quint8) + x_quantized = torch.quantize_per_tensor( + x, 1.0, 0, torch.quint8) conv = self.conv(x_quantized) return torch.dequantize(conv).contiguous() @@ -54,18 +55,26 @@ def forward(self, x): "params", [ pytest.param( - {"weights_shape": [1, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 1}, + {"weights_shape": [1, 3, 3, 3], "strides": 1, + "pads": 0, "dilations": 1, "groups": 1}, marks=pytest.mark.xfail( reason="Output channels equal to 1 creates output that fails to cast to contiguous." ), ), - {"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 1}, - {"weights_shape": [2, 3, 3, 3], "strides": 2, "pads": 0, "dilations": 1, "groups": 1}, - {"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 1, "dilations": 1, "groups": 1}, - {"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 2, "groups": 1}, - {"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": [0, 1], "dilations": 1, "groups": 1}, - {"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": [1, 0], "dilations": 1, "groups": 1}, - {"weights_shape": [3, 1, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 3}, + {"weights_shape": [2, 3, 3, 3], "strides": 1, + "pads": 0, "dilations": 1, "groups": 1}, + {"weights_shape": [2, 3, 3, 3], "strides": 2, + "pads": 0, "dilations": 1, "groups": 1}, + {"weights_shape": [2, 3, 3, 3], "strides": 1, + "pads": 1, "dilations": 1, "groups": 1}, + {"weights_shape": [2, 3, 3, 3], "strides": 1, + "pads": 0, "dilations": 2, "groups": 1}, + {"weights_shape": [2, 3, 3, 3], "strides": 1, + "pads": [0, 1], "dilations": 1, "groups": 1}, + {"weights_shape": [2, 3, 3, 3], "strides": 1, + "pads": [1, 0], "dilations": 1, "groups": 1}, + {"weights_shape": [3, 1, 3, 3], "strides": 1, + "pads": 0, "dilations": 1, "groups": 3}, ], ) @pytest.mark.parametrize("bias", [True, False]) @@ -73,13 +82,10 @@ def forward(self, x): @pytest.mark.parametrize("scale", [1, 0.3, 1.3]) @pytest.mark.parametrize("zero_point", [0, 1]) @pytest.mark.nightly - # @pytest.mark.precommit Test disabled due to sporadic issues + @pytest.mark.precommit def test_quantized_conv2d(self, params, bias, relu, scale, zero_point, ie_device, precision, ir_version): self._test( - *self.create_model(**params, bias=bias, relu=relu, scale=scale, zero_point=zero_point), - ie_device, - precision, - ir_version, - trace_model=True, - freeze_model=False + *self.create_model(**params, bias=bias, relu=relu, + scale=scale, zero_point=zero_point), + ie_device, precision, ir_version, trace_model=True, freeze_model=False, quantized_ops=True, quant_size=scale ) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_linear.py b/tests/layer_tests/pytorch_tests/test_quantized_linear.py index 4041bd75dc6be6..8ee268311446bc 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_linear.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_linear.py @@ -72,13 +72,13 @@ def forward(self, inp): @pytest.mark.parametrize("zero_point", [0, 1]) @pytest.mark.parametrize("trace", [True, False]) @pytest.mark.nightly - # @pytest.mark.precommit Test disabled due to sporadic issues + @pytest.mark.precommit def test_quantized_linear(self, params, scale, zero_point, trace, ie_device, precision, ir_version): input_shape = params.get("input_shape") weight_shape = params.get("weight_shape") bias = params.get("bias", False) self._test(*self.create_model(weight_shape, bias, scale, zero_point), ie_device, precision, ir_version, - kwargs_to_prepare_input={"input_shape": input_shape}, trace_model=trace, freeze_model=False) + kwargs_to_prepare_input={"input_shape": input_shape}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=scale) @pytest.mark.parametrize("trace", [True, False]) @pytest.mark.parametrize("inplace", [True, False]) @@ -86,4 +86,4 @@ def test_quantized_linear(self, params, scale, zero_point, trace, ie_device, pre @pytest.mark.precommit def test_quantized_hardtanh_linear(self, trace, inplace, ie_device, precision, ir_version): self._test(*self.create_hardtanh_model([10, 9], True, 1, 0.3, inplace), ie_device, precision, ir_version, - kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False) + kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=0.3)