You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Checkout the main branch, with commit hash: 279f9c3
Run the following code:
import onnx
import numpy as np
from onnx import helper, TensorProto
from qonnx.core.modelwrapper import ModelWrapper
import qonnx.core.onnx_exec as oxe
# Define the input tensor (e.g., a 4D tensor with shape [N, C, H, W])
input_tensor = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 64, 64])
# Define the output tensor shape (e.g., resizing to [1, 3, 128, 128])
output_tensor = helper.make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 128, 128])
# Define the scale tensor (for upscaling by a factor of 2 along height and width)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_initializer = helper.make_tensor("scales", TensorProto.FLOAT, [4], scales)
# Create the Resize node
resize_node = helper.make_node(
"Resize",
inputs=["input", "", "scales"], # Empty string for "roi" input as it's not used here
outputs=["output"],
mode="nearest"
)
# Create the graph with input, scales, and output
graph = helper.make_graph(
nodes=[resize_node],
name="ResizeGraph",
inputs=[input_tensor],
outputs=[output_tensor],
initializer=[scales_initializer]
)
# Define the model
model = helper.make_model(graph, opset_imports=[helper.make_operatorsetid("", 13)])
# Save the model
onnx.save(model, "resize_model.onnx")
qonnx_model = ModelWrapper('resize_model.onnx')
# Calling InferShapes() does not solve the issue
qonnx_model = qonnx_model.transform(InferShapes())
ishape = tuple(qonnx_model.get_tensor_shape(qonnx_model.graph.input[0].name))
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape).astype(np.float32)
idict = {qonnx_model.graph.input[0].name: X}
y_qonnx = oxe.execute_onnx(qonnx_model, idict)[qonnx_model.graph.output[0].name]
Expected behavior
The empty input should not be considered since when RoI is empty, scales or other inputs should be considered to computed the output shape of the node.
Actual behavior
raise Exception("Found unspecified tensor shapes, try infer_shapes") Exception: Found unspecified tensor shapes, try infer_shapes
from
I fixed the issue by creating the following transformation (note, this is just to show how I fixed the issue, I don't think code should be added in the repository)
class FillEmptyRoI(Transformation):
"Fill empty RoI input tensor of Resize node if is empty to avoid issues during shape inference"
def apply(self, model):
graph_modified = False
for i, node in enumerate(model.graph.node):
if node.op_type == 'Resize':
# Assuming 'roi' is the second input
if len(node.input) > 2 and node.input[1] == '':
roi = onnx.numpy_helper.from_array(np.empty([0], dtype=np.float32), node.name + "_roi")
model.graph.initializer.append(roi)
roi_value_info = helper.make_tensor_value_info(node.name + "_roi", onnx.TensorProto.FLOAT, [0])
model.graph.value_info.append(roi_value_info)
inputs = [node.input[0], node.name + "_roi", node.input[2]]
mode_string = ''
for attr in model.graph.node[i].attribute:
if attr.name == 'mode':
mode_string = attr.s
new_node = onnx.helper.make_node(
"Resize",
coordinate_transformation_mode="asymmetric",
cubic_coeff_a=-0.75,
mode=mode_string,
nearest_mode="floor",
inputs=inputs,
outputs=node.output
)
model.graph.node.remove(node)
model.graph.node.insert(i, new_node)
graph_modified = True
return (model, graph_modified)
The following code works as expected
import onnx
import numpy as np
from onnx import helper, TensorProto
from qonnx.core.modelwrapper import ModelWrapper
import qonnx.core.onnx_exec as oxe
from qonnx.transformation.base import Transformation
from qonnx.transformation.infer_shapes import InferShapes
class FillEmptyRoI(Transformation):
"Fill empty RoI input tensor of Resize node if is empty to avoid issues during shape inference"
def apply(self, model):
graph_modified = False
for i, node in enumerate(model.graph.node):
if node.op_type == 'Resize':
# Assuming 'roi' is the second input
if len(node.input) > 2 and node.input[1] == '':
roi = onnx.numpy_helper.from_array(np.empty([0], dtype=np.float32), node.name + "_roi")
model.graph.initializer.append(roi)
roi_value_info = helper.make_tensor_value_info(node.name + "_roi", onnx.TensorProto.FLOAT, [0])
model.graph.value_info.append(roi_value_info)
inputs = [node.input[0], node.name + "_roi", node.input[2]]
mode_string = ''
for attr in model.graph.node[i].attribute:
if attr.name == 'mode':
mode_string = attr.s
new_node = onnx.helper.make_node(
"Resize",
coordinate_transformation_mode="asymmetric",
cubic_coeff_a=-0.75,
mode=mode_string,
nearest_mode="floor",
inputs=inputs,
outputs=node.output
)
model.graph.node.remove(node)
model.graph.node.insert(i, new_node)
graph_modified = True
return (model, graph_modified)
# Define the input tensor (e.g., a 4D tensor with shape [N, C, H, W])
input_tensor = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 64, 64])
# Define the output tensor shape (e.g., resizing to [1, 3, 128, 128])
output_tensor = helper.make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 128, 128])
# Define the scale tensor (for upscaling by a factor of 2 along height and width)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
scales_initializer = helper.make_tensor("scales", TensorProto.FLOAT, [4], scales)
# Create the Resize node
resize_node = helper.make_node(
"Resize",
inputs=["input", "", "scales"], # Empty string for "roi" input as it's not used here
outputs=["output"],
mode="nearest"
)
# Create the graph with input, scales, and output
graph = helper.make_graph(
nodes=[resize_node],
name="ResizeGraph",
inputs=[input_tensor],
outputs=[output_tensor],
initializer=[scales_initializer]
)
# Define the model
model = helper.make_model(graph, opset_imports=[helper.make_operatorsetid("", 13)])
# Save the model
onnx.save(model, "resize_model.onnx")
qonnx_model = ModelWrapper('resize_model.onnx')
# Calling InferShapes() does not solve the issue
# qonnx_model = qonnx_model.transform(InferShapes())
qonnx_model = qonnx_model.transform(FillEmptyRoI())
ishape = tuple(qonnx_model.get_tensor_shape(qonnx_model.graph.input[0].name))
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape).astype(np.float32)
idict = {qonnx_model.graph.input[0].name: X}
y_qonnx = oxe.execute_onnx(qonnx_model, idict)[qonnx_model.graph.output[0].name]
Note: if you want to ingest in hls4ml, I found useful to revert the model to the previous state, i.e. with RoI input set to ''.
I used this optimiser to implement this behaviour:
class EmptyFilledRoI(Transformation):
"Remove RoI tensor of Resize node added for shape inference"
def apply(self, model):
graph_modified = False
for node in model.graph.node:
if node.op_type == 'Resize':
# Assuming 'roi' is the second input
if len(node.input) > 2 and node.input[1] != '':
init_names = [x.name for x in model.graph.initializer]
i = init_names.index(node.input[1])
init_to_remove = model.graph.initializer[i]
model.graph.initializer.remove(init_to_remove)
node.input[1] = ''
graph_modified = True
return (model, graph_modified)
The text was updated successfully, but these errors were encountered:
Prerequisites
Quick summary
For models containing
Resize
node of version > 10, ifRoI
input is not used and left emptycheck_all_tensor_shapes_specified(...)
(qonnx/src/qonnx/core/modelwrapper.py
Line 493 in 279f9c3
False
.Steps to Reproduce
Expected behavior
The empty input should not be considered since when
RoI
is empty,scales
or other inputs should be considered to computed the output shape of the node.Actual behavior
raise Exception("Found unspecified tensor shapes, try infer_shapes") Exception: Found unspecified tensor shapes, try infer_shapes
from
qonnx/src/qonnx/core/modelwrapper.py
Line 493 in 279f9c3
Possible fix
I fixed the issue by creating the following transformation (note, this is just to show how I fixed the issue, I don't think code should be added in the repository)
The following code works as expected
Note: if you want to ingest in hls4ml, I found useful to revert the model to the previous state, i.e. with
RoI
input set to''
.I used this optimiser to implement this behaviour:
The text was updated successfully, but these errors were encountered: