Skip to content

Commit 16ced4e

Browse files
authored
Arm backend: Enable run on vulkan runtime by default and add xfails (pytorch#14462)
Enables runing on vulkan runtime by default and add xfails for those tests that can not yet run.
1 parent e252353 commit 16ced4e

19 files changed

+43
-49
lines changed

backends/arm/test/models/test_conformer.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -136,18 +136,9 @@ def test_conformer_vgf_INT():
136136
exir_op=[],
137137
tosa_version="TOSA-1.0+INT",
138138
use_to_edge_transform_and_lower=True,
139+
run_on_vulkan_runtime=False, # TODO: run on vulkan runtime
139140
)
140141
pipeline.pop_stage("check_count.exir")
141-
142-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
143-
# pipeline.change_args(
144-
# "run_method_and_compare_outputs",
145-
# get_test_inputs(
146-
# TestConformer.dim, TestConformer.lengths, TestConformer.num_examples
147-
# ),
148-
# rtol=1.0,
149-
# atol=3.0,
150-
# )
151142
pipeline.run()
152143

153144

backends/arm/test/models/test_dl3_arm.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -99,11 +99,8 @@ def test_dl3_vgf_INT():
9999
exir_op=[],
100100
tosa_version="TOSA-1.0+INT",
101101
use_to_edge_transform_and_lower=True,
102+
run_on_vulkan_runtime=False, # TODO: run on vulkan runtime
102103
)
103-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
104-
# pipeline.change_args(
105-
# "run_method_and_compare_outputs", rtol=1.0, atol=1.0
106-
# )
107104
pipeline.run()
108105

109106

@@ -117,8 +114,4 @@ def test_dl3_vgf_FP():
117114
tosa_version="TOSA-1.0+FP",
118115
use_to_edge_transform_and_lower=True,
119116
)
120-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
121-
# pipeline.change_args(
122-
# "run_method_and_compare_outputs", rtol=1.0, atol=1.0
123-
# )
124117
pipeline.run()

backends/arm/test/models/test_lstm_arm.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -111,10 +111,6 @@ def test_lstm_vgf_INT():
111111
tosa_version="TOSA-1.0+INT",
112112
use_to_edge_transform_and_lower=True,
113113
)
114-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
115-
# pipeline.change_args(
116-
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
117-
# )
118114
pipeline.run()
119115

120116

@@ -128,8 +124,4 @@ def test_lstm_vgf_FP():
128124
tosa_version="TOSA-1.0+FP",
129125
use_to_edge_transform_and_lower=True,
130126
)
131-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
132-
# pipeline.change_args(
133-
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
134-
# )
135127
pipeline.run()

backends/arm/test/models/test_mobilenet_v2_arm.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,8 @@ def test_mv2_vgf_INT(per_channel_quantization):
127127
per_channel_quantization=per_channel_quantization,
128128
atol=0.25,
129129
qtol=1,
130+
run_on_vulkan_runtime=False,
130131
)
131-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
132-
# pipeline.change_args(
133-
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
134-
# )
135132
pipeline.run()
136133

137134

@@ -144,9 +141,6 @@ def test_mv2_vgf_FP():
144141
exir_op=[],
145142
tosa_version="TOSA-1.0+FP",
146143
use_to_edge_transform_and_lower=True,
144+
run_on_vulkan_runtime=False,
147145
)
148-
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
149-
# pipeline.change_args(
150-
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
151-
# ) # TODO: MLETORCH-1036 decrease tolerance
152146
pipeline.run()

backends/arm/test/ops/test_addmm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,7 @@ def test_addmm_u85_INT(test_data: Tuple):
167167

168168
@common.parametrize("test_data", test_data_suite)
169169
@common.SkipIfNoModelConverter
170+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
170171
def test_addmm_vgf_FP(test_data: input_t1):
171172
pipeline = VgfPipeline[input_t1](
172173
Addmm(),
@@ -180,6 +181,7 @@ def test_addmm_vgf_FP(test_data: input_t1):
180181

181182
@common.parametrize("test_data", test_data_suite)
182183
@common.SkipIfNoModelConverter
184+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
183185
def test_addmm_vgf_INT(test_data: input_t1):
184186
pipeline = VgfPipeline[input_t1](
185187
Addmm(),

backends/arm/test/ops/test_amax.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ def test_max_dim_tosa_FP_not_delegated():
140140

141141
@common.parametrize("test_data", Amax.test_data)
142142
@common.SkipIfNoModelConverter
143+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
143144
def test_amax_vgf_FP(test_data: Amax.input_t):
144145
data, dim, keep_dims = test_data()
145146
module = Amax(dim, keep_dims)
@@ -154,6 +155,7 @@ def test_amax_vgf_FP(test_data: Amax.input_t):
154155

155156
@common.parametrize("test_data", Amax.test_data)
156157
@common.SkipIfNoModelConverter
158+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
157159
def test_amax_vgf_INT(test_data: Amax.input_t):
158160
data, dim, keep_dims = test_data()
159161
module = Amax(dim, keep_dims)
@@ -168,6 +170,7 @@ def test_amax_vgf_INT(test_data: Amax.input_t):
168170

169171
@common.parametrize("test_data", Max.test_data)
170172
@common.SkipIfNoModelConverter
173+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
171174
def test_max_dim_vgf_FP_to_amax(test_data: Max.input_t):
172175
data, dim = test_data()
173176
pipeline = VgfPipeline[Max.input_t](
@@ -181,6 +184,7 @@ def test_max_dim_vgf_FP_to_amax(test_data: Max.input_t):
181184

182185
@common.parametrize("test_data", Max.test_data)
183186
@common.SkipIfNoModelConverter
187+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
184188
def test_max_dim_vgf_INT_to_amax(test_data: Max.input_t):
185189
data, dim = test_data()
186190
pipeline = VgfPipeline[Max.input_t](

backends/arm/test/ops/test_amin.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,7 @@ def test_min_dim_tosa_FP_not_delegated():
152152

153153
@common.parametrize("test_data", Amin.test_data)
154154
@common.SkipIfNoModelConverter
155+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
155156
def test_amin_vgf_FP(test_data: Amin.input_t):
156157
data, dim, keep_dims = test_data()
157158
pipeline = VgfPipeline[Amin.input_t](
@@ -162,6 +163,7 @@ def test_amin_vgf_FP(test_data: Amin.input_t):
162163

163164
@common.parametrize("test_data", Amin.test_data)
164165
@common.SkipIfNoModelConverter
166+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
165167
def test_amin_vgf_INT(test_data: Amin.input_t):
166168
data, dim, keep_dims = test_data()
167169
pipeline = VgfPipeline[Amin.input_t](
@@ -175,6 +177,7 @@ def test_amin_vgf_INT(test_data: Amin.input_t):
175177

176178
@common.parametrize("test_data", Min.test_data)
177179
@common.SkipIfNoModelConverter
180+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
178181
def test_min_dim_vgf_FP_to_amin(test_data: Min.input_t):
179182
data, dim = test_data()
180183
pipeline = VgfPipeline[Min.input_t](
@@ -188,6 +191,7 @@ def test_min_dim_vgf_FP_to_amin(test_data: Min.input_t):
188191

189192
@common.parametrize("test_data", Min.test_data)
190193
@common.SkipIfNoModelConverter
194+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
191195
def test_min_dim_vgf_INT_to_amin(test_data: Min.input_t):
192196
data, dim = test_data()
193197
pipeline = VgfPipeline[Min.input_t](

backends/arm/test/ops/test_any.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from typing import List, Tuple
88

9+
import pytest
910
import torch
1011
from executorch.backends.arm.test import common
1112
from executorch.backends.arm.test.tester.test_pipeline import (
@@ -189,6 +190,7 @@ def test_any_u85_INT(test_data: input_t1):
189190

190191
@common.parametrize("test_data", test_data)
191192
@common.SkipIfNoModelConverter
193+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
192194
def test_any_vgf_FP(test_data: input_t1):
193195
op, data_fn = test_data()
194196
pipeline = VgfPipeline[input_t1](
@@ -203,6 +205,7 @@ def test_any_vgf_FP(test_data: input_t1):
203205

204206
@common.parametrize("test_data", test_data)
205207
@common.SkipIfNoModelConverter
208+
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
206209
def test_any_vgf_INT(test_data: input_t1):
207210
op, data_fn = test_data()
208211
pipeline = VgfPipeline[input_t1](

backends/arm/test/ops/test_bmm.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,4 @@ def test_bmm_vgf_INT_single_input(test_data: input_t1):
186186
exir_op_bmm,
187187
tosa_version="TOSA-1.0+INT",
188188
)
189-
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
190-
# pipeline.change_args("run_method_and_compare_outputs", qtol=1)
191189
pipeline.run()

backends/arm/test/ops/test_clamp.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,4 @@ def test_clamp_vgf_INT(test_data):
149149
exir_op,
150150
tosa_version="TOSA-1.0+INT",
151151
)
152-
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
153-
# pipeline.change_args("run_method_and_compare_outputs", qtol=1)
154152
pipeline.run()

0 commit comments

Comments
 (0)