Skip to content

Commit

Permalink
[PT FE] Enable quantized conv and linear tests (openvinotoolkit#18723)
Browse files Browse the repository at this point in the history
  • Loading branch information
mvafin authored Jul 24, 2023
1 parent 7e1d828 commit bc734df
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 19 deletions.
38 changes: 22 additions & 16 deletions tests/layer_tests/pytorch_tests/test_quantized_convnd.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ def __init__(self):
self.conv.zero_point = int(zero_point)

def forward(self, x):
x_quantized = torch.quantize_per_tensor(x, 1.0, 0, torch.quint8)
x_quantized = torch.quantize_per_tensor(
x, 1.0, 0, torch.quint8)
conv = self.conv(x_quantized)
return torch.dequantize(conv).contiguous()

Expand All @@ -54,32 +55,37 @@ def forward(self, x):
"params",
[
pytest.param(
{"weights_shape": [1, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 1},
{"weights_shape": [1, 3, 3, 3], "strides": 1,
"pads": 0, "dilations": 1, "groups": 1},
marks=pytest.mark.xfail(
reason="Output channels equal to 1 creates output that fails to cast to contiguous."
),
),
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 2, "pads": 0, "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 1, "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 2, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": [0, 1], "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": [1, 0], "dilations": 1, "groups": 1},
{"weights_shape": [3, 1, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 3},
{"weights_shape": [2, 3, 3, 3], "strides": 1,
"pads": 0, "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 2,
"pads": 0, "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1,
"pads": 1, "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1,
"pads": 0, "dilations": 2, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1,
"pads": [0, 1], "dilations": 1, "groups": 1},
{"weights_shape": [2, 3, 3, 3], "strides": 1,
"pads": [1, 0], "dilations": 1, "groups": 1},
{"weights_shape": [3, 1, 3, 3], "strides": 1,
"pads": 0, "dilations": 1, "groups": 3},
],
)
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("relu", [True, False])
@pytest.mark.parametrize("scale", [1, 0.3, 1.3])
@pytest.mark.parametrize("zero_point", [0, 1])
@pytest.mark.nightly
# @pytest.mark.precommit Test disabled due to sporadic issues
@pytest.mark.precommit
def test_quantized_conv2d(self, params, bias, relu, scale, zero_point, ie_device, precision, ir_version):
self._test(
*self.create_model(**params, bias=bias, relu=relu, scale=scale, zero_point=zero_point),
ie_device,
precision,
ir_version,
trace_model=True,
freeze_model=False
*self.create_model(**params, bias=bias, relu=relu,
scale=scale, zero_point=zero_point),
ie_device, precision, ir_version, trace_model=True, freeze_model=False, quantized_ops=True, quant_size=scale
)
6 changes: 3 additions & 3 deletions tests/layer_tests/pytorch_tests/test_quantized_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,18 +72,18 @@ def forward(self, inp):
@pytest.mark.parametrize("zero_point", [0, 1])
@pytest.mark.parametrize("trace", [True, False])
@pytest.mark.nightly
# @pytest.mark.precommit Test disabled due to sporadic issues
@pytest.mark.precommit
def test_quantized_linear(self, params, scale, zero_point, trace, ie_device, precision, ir_version):
input_shape = params.get("input_shape")
weight_shape = params.get("weight_shape")
bias = params.get("bias", False)
self._test(*self.create_model(weight_shape, bias, scale, zero_point), ie_device, precision, ir_version,
kwargs_to_prepare_input={"input_shape": input_shape}, trace_model=trace, freeze_model=False)
kwargs_to_prepare_input={"input_shape": input_shape}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=scale)

@pytest.mark.parametrize("trace", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_quantized_hardtanh_linear(self, trace, inplace, ie_device, precision, ir_version):
self._test(*self.create_hardtanh_model([10, 9], True, 1, 0.3, inplace), ie_device, precision, ir_version,
kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False)
kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=0.3)

0 comments on commit bc734df

Please sign in to comment.