Skip to content

Commit 1d726a4

Browse files
committed
NXP backend: Improve constant_pad_nd delegation by using inferred node formats.
1 parent b2502b3 commit 1d726a4

File tree

2 files changed

+63
-9
lines changed

2 files changed

+63
-9
lines changed

backends/nxp/backend/ir/converter/node_converters/ops_converters/constant_pad_nd_converter.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
pad_options,
2828
pad_v2_options,
2929
)
30+
31+
from executorch.backends.nxp.backend.node_format_inference import NXP_NODE_FORMAT
3032
from torch.fx import Node
3133
from torch.nn import Parameter
3234

@@ -41,11 +43,17 @@ def _is_supported_on_target(
4143
) -> bool:
4244
match target:
4345
case Target.RT700:
44-
# TODO: Consider different tensor formats (dim-order)
4546
paddings = node.args[1]
46-
if len(paddings) > 4 and paddings[4:6] != [0, 0]:
47-
# Attempt to Pad channels dimension, which is not supported on Neutron.
48-
return False
47+
if node.meta[NXP_NODE_FORMAT].is_channels_first():
48+
# Dim `1` will end up being the channels. It is padded by paddings[4:6].
49+
if len(paddings) > 4 and paddings[4:6] != [0, 0]:
50+
# Attempt to Pad channels dimension -> currently not supported
51+
return False
52+
else:
53+
# Dim `-1` will end up being the channels. It is padded by paddings[:2].
54+
if len(paddings) > 0 and paddings[:2] != [0, 0]:
55+
# Attempt to Pad channels dimension -> currently not supported
56+
return False
4957

5058
return True
5159

@@ -71,10 +79,6 @@ def _is_supported_in_IR(
7179
if not NodeConverter._has_shared_q_params_if_quantized(node):
7280
return False
7381

74-
if len(paddings) > 4 and paddings[4:6] != [0, 0]:
75-
# Attempt to Pad channels dimension -> currently not supported
76-
return False
77-
7882
return True
7983

8084
# noinspection PyMethodMayBeStatic

backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 NXP
1+
# Copyright 2024-2025 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
@@ -13,13 +13,15 @@
1313
)
1414
from executorch.backends.nxp.tests.executors import (
1515
convert_run_compare,
16+
graph_contains_any_of_ops,
1617
ToNCHWPreprocess,
1718
ToNHWCPreprocess,
1819
)
1920
from executorch.backends.nxp.tests.models import (
2021
ConstantPadNDConvModule,
2122
ConstantPadNDModule,
2223
)
24+
from executorch.exir.dialects._ops import ops as exir_ops
2325

2426

2527
@pytest.fixture(autouse=True)
@@ -121,3 +123,51 @@ def test_constant_pad_nd__unsupported_paddings(input_shape, paddings):
121123
nodes = list(exec_program.graph.nodes)
122124
# There is at least one non-delegated Pad node
123125
assert any(node.name == "aten_constant_pad_nd_default" for node in nodes)
126+
127+
128+
def test_constant_pad_nd__delegation__formatless__supported_padding():
129+
input_shape = (2, 4, 6, 8) # Formatless -> the last dim (8) will be padded.
130+
paddings = [0, 0, 1, 2, 3, 4] # The last dim is padded using the first 2 paddings.
131+
model = ConstantPadNDModule(paddings)
132+
exec_program = to_quantized_edge_program(model, input_shape).exported_program()
133+
134+
# Make sure the `pad` was delegated.
135+
assert not graph_contains_any_of_ops(
136+
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
137+
)
138+
139+
140+
def test_constant_pad_nd__delegation__formatless__unsupported_padding():
141+
input_shape = (2, 4, 6, 8) # Formatless -> the last dim (8) will be padded.
142+
paddings = [0, 1] # The last dim is padded using the first 2 paddings.
143+
model = ConstantPadNDModule(paddings)
144+
exec_program = to_quantized_edge_program(model, input_shape).exported_program()
145+
146+
# Make sure the `pad` was NOT delegated.
147+
assert graph_contains_any_of_ops(
148+
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
149+
)
150+
151+
152+
def test_constant_pad_nd__delegation__channels_first__supported_padding():
153+
input_shape = (2, 4, 6, 8) # Channels first -> the second dim (4) will be padded.
154+
paddings = [1, 2, 3, 4, 0, 0] # The second dim is padded using the paddings[4:6].
155+
model = ConstantPadNDConvModule(paddings)
156+
exec_program = to_quantized_edge_program(model, input_shape).exported_program()
157+
158+
# Make sure the `pad` was delegated.
159+
assert not graph_contains_any_of_ops(
160+
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
161+
)
162+
163+
164+
def test_constant_pad_nd__delegation__channels_first__unsupported_padding():
165+
input_shape = (2, 3, 6, 8) # Channels first -> the second dim (3) will be padded.
166+
paddings = [0, 0, 0, 0, 1, 0] # The second dim is padded using the paddings[4:6].
167+
model = ConstantPadNDConvModule(paddings)
168+
exec_program = to_quantized_edge_program(model, input_shape).exported_program()
169+
170+
# Make sure the `pad` was NOT delegated.
171+
assert graph_contains_any_of_ops(
172+
exec_program.graph, [exir_ops.edge.aten.constant_pad_nd.default]
173+
)

0 commit comments

Comments
 (0)