77
88from typing import Tuple
99
10+ import pytest
1011import torch
1112from executorch .backends .arm .quantizer .arm_quantizer import (
1213 get_symmetric_a16w8_quantization_config ,
4344
4445
4546class Slice (torch .nn .Module ):
46-
4747 def forward (self , x : torch .Tensor , s : list [tuple [int , int ]]):
4848 slices = [slice (* i ) for i in s ]
4949 return x [slices ]
@@ -153,6 +153,9 @@ def get_symmetric_a16w8_slice_quantizer(per_channel_quantization=False):
153153
154154
155155@common .parametrize ("test_data" , test_data_suite )
156+ @pytest .mark .xfail (
157+ reason = "missing int16 slice ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13976"
158+ )
156159def test_slice_tensor_16a8w_tosa_INT (test_data : torch .Tensor ):
157160 """Test slice operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
158161 per_channel_quantization = False
@@ -178,6 +181,9 @@ def test_slice_tensor_16a8w_tosa_INT(test_data: torch.Tensor):
178181
179182@common .parametrize ("test_data" , test_data_suite )
180183@common .XfailIfNoCorstone300
184+ @pytest .mark .xfail (
185+ reason = "Vela compilation fails with 'Invalid arguments' for int16 slice operations"
186+ )
181187def test_slice_tensor_16a8w_u55_INT16 (test_data : torch .Tensor ):
182188 """Test slice operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)"""
183189 per_channel_quantization = False
@@ -202,6 +208,9 @@ def test_slice_tensor_16a8w_u55_INT16(test_data: torch.Tensor):
202208
203209@common .parametrize ("test_data" , test_data_suite )
204210@common .XfailIfNoCorstone320
211+ @pytest .mark .xfail (
212+ reason = "Vela compilation fails with 'Invalid arguments' for int16 slice operations"
213+ )
205214def test_slice_tensor_16a8w_u85_INT16 (test_data : torch .Tensor ):
206215 """Test slice operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)"""
207216 per_channel_quantization = False
0 commit comments