Skip to content

Commit

Permalink
Fix DNNC output parsing issue if non-alphabetic and non-numeric chara…
Browse files Browse the repository at this point in the history
…cters exist in the output names
  • Loading branch information
jornt-xilinx committed May 12, 2021
1 parent bc290a5 commit 52d7e7c
Show file tree
Hide file tree
Showing 3 changed files with 216 additions and 62 deletions.
114 changes: 58 additions & 56 deletions python/pyxir/contrib/target/components/DPUCZDX8G/dnnc_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.

""" Module wrapping parsing DNNC compiler output """
"""Module wrapping parsing DNNC compiler output"""

import abc
import re


class BaseDNNCOutput(object):
Expand All @@ -38,47 +39,46 @@ def get_output_nodes(self):


class DNNCOutput(BaseDNNCOutput):

def _parse(self, output):
# type: (str) -> dict
""" Parse DNNC v4.0 compiler output """

d = {
'Boundary Input Tensors': {},
'Boundary Output Tensors': {},
'Boundary Output Tensors Shapes': {},
'Input Nodes': {},
'Output Nodes': {}
"Boundary Input Tensors": {},
"Boundary Output Tensors": {},
"Boundary Output Tensors Shapes": {},
"Input Nodes": {},
"Output Nodes": {},
}
lines = output.split('\n')[0].split('\\n')
lines = output.split("\n")[0].split("\\n")

for idx, line in enumerate(lines):
split_line = line.lstrip().rstrip().split(" : ")

if split_line[0] == 'Kernel ID':
d['Kernel ID'] = lines[idx + 1].split(" : ")[0]
d['Name'] = lines[idx + 1].split(" : ")[1]
elif split_line[0] == 'Kernel Name':
d['Kernel Name'] = split_line[1]
elif split_line[0] == 'Kernel Type':
d['Kernel Type'] = split_line[1]
elif split_line[0] == 'Code Size':
d['Code Size'] = split_line[1]
elif split_line[0] == 'Param Size':
d['Param Size'] = split_line[1]
elif split_line[0] == 'Workload MACs':
d['Workload MACs'] = split_line[1]
elif split_line[0] == 'IO Memory Space':
d['IO Memory Space'] = split_line[1]
elif split_line[0] == 'Mean Value':
d['Mean Value'] = split_line[1].split(',')
elif split_line[0] == 'Node Count':
d['Node Count'] = split_line[1]
elif split_line[0] == 'Tensor Count':
d['Tensor Count'] = split_line[1]
elif split_line[0] == 'Total Tensor Count':
d['Tensor Count'] = split_line[1]
elif split_line[0] == 'Boundary Input Tensor(s) (H*W*C)':
if split_line[0] == "Kernel ID":
d["Kernel ID"] = lines[idx + 1].split(" : ")[0]
d["Name"] = lines[idx + 1].split(" : ")[1]
elif split_line[0] == "Kernel Name":
d["Kernel Name"] = split_line[1]
elif split_line[0] == "Kernel Type":
d["Kernel Type"] = split_line[1]
elif split_line[0] == "Code Size":
d["Code Size"] = split_line[1]
elif split_line[0] == "Param Size":
d["Param Size"] = split_line[1]
elif split_line[0] == "Workload MACs":
d["Workload MACs"] = split_line[1]
elif split_line[0] == "IO Memory Space":
d["IO Memory Space"] = split_line[1]
elif split_line[0] == "Mean Value":
d["Mean Value"] = split_line[1].split(",")
elif split_line[0] == "Node Count":
d["Node Count"] = split_line[1]
elif split_line[0] == "Tensor Count":
d["Tensor Count"] = split_line[1]
elif split_line[0] == "Total Tensor Count":
d["Tensor Count"] = split_line[1]
elif split_line[0] == "Boundary Input Tensor(s) (H*W*C)":
for i in range(idx + 1, len(lines)):
split_line_i = lines[i].lstrip().rstrip().split(" : ")

Expand All @@ -87,12 +87,14 @@ def _parse(self, output):

name, shape = split_line_i

if shape in d['Boundary Input Tensors']:
raise ValueError("DNNC compiler cannot handle multiple"
" inputs with the same shape")
if shape in d["Boundary Input Tensors"]:
raise ValueError(
"DNNC compiler cannot handle multiple"
" inputs with the same shape"
)

d['Boundary Input Tensors'][shape] = name.split(":")[0]
elif split_line[0] == 'Boundary Output Tensor(s) (H*W*C)':
d["Boundary Input Tensors"][shape] = name.split(":")[0]
elif split_line[0] == "Boundary Output Tensor(s) (H*W*C)":
for i in range(idx + 1, len(lines)):
split_line_i = lines[i].lstrip().rstrip().split(" : ")

Expand All @@ -105,13 +107,12 @@ def _parse(self, output):
# if shape in d['Boundary Output Tensors']:
# raise ValueError("DNNC compiler cannot handle multiple"
# " outputs with the same shape")

d['Boundary Output Tensors'][name] = name + ':0'
d['Boundary Output Tensors Shapes'][shape] = name + ':0'
elif split_line[0] == 'Total Node Count':
d['Total Node Count'] = split_line[1]
elif split_line[0] in ['Input Node(s) (H*W*C)',
'Input Node(s)(H*W*C)']:

d["Boundary Output Tensors"][name] = name + ":0"
d["Boundary Output Tensors Shapes"][shape] = name + ":0"
elif split_line[0] == "Total Node Count":
d["Total Node Count"] = split_line[1]
elif split_line[0] in ["Input Node(s) (H*W*C)", "Input Node(s)(H*W*C)"]:
for i in range(idx + 1, len(lines)):
split_line_i = lines[i].lstrip().rstrip().split(" : ")

Expand All @@ -120,14 +121,15 @@ def _parse(self, output):

name, shape = split_line_i

if shape in d['Input Nodes']:
raise ValueError("DNNC compiler cannot handle multiple"
" inputs with the same shape")
if shape in d["Input Nodes"]:
raise ValueError(
"DNNC compiler cannot handle multiple"
" inputs with the same shape"
)

d['Input Nodes'][shape] = name[:-3]
d["Input Nodes"][shape] = name[:-3]

elif split_line[0] in ['Output Node(s) (H*W*C)',
'Output Node(s)(H*W*C)']:
elif split_line[0] in ["Output Node(s) (H*W*C)", "Output Node(s)(H*W*C)"]:
for i in range(idx + 1, len(lines)):
split_line_i = lines[i].lstrip().rstrip().split(" : ")

Expand All @@ -140,22 +142,22 @@ def _parse(self, output):
# raise ValueError("DNNC compiler cannot handle multiple"
# " outputs with the same shape")

d['Output Nodes'][name[:-3]] = name[:-3]
d['Output Nodes'][shape] = name[:-3]
d["Output Nodes"][name[:-3]] = name[:-3]
d["Output Nodes"][shape] = name[:-3]

return d

def get_input_nodes(self):
# type: () -> Dict[str, str]
return self.d['Input Nodes']
return self.d["Input Nodes"]

def get_output_nodes(self):
# type: () -> Dict[str, str]
return self.d['Boundary Output Tensors']
return self.d["Boundary Output Tensors"]

def get_output_nodes_on_shapes(self):
# type: () -> Dict[str, str]
return self.d['Boundary Output Tensors Shapes']
return self.d["Boundary Output Tensors Shapes"]

def get_dnnc_str(self, str_value: str) -> str:
return str_value.replace('-', '_')
return re.sub(r"[^A-Za-z0-9]", "_", str_value)
148 changes: 144 additions & 4 deletions tests/unit/contrib/compilation_infra.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ def _create_conv2d_pool2d_nhwc_oihw(
conv_invalid=False,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
conv_name="conv1",
pool_name="pool1",
) -> XGraph:

kernel_w, kernel_h = w_shape[2], w_shape[3]
Expand All @@ -76,7 +78,7 @@ def _create_conv2d_pool2d_nhwc_oihw(
x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name="conv1",
op_name=conv_name,
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
Expand All @@ -87,7 +89,7 @@ def _create_conv2d_pool2d_nhwc_oihw(
data_layout="NHWC",
)
pool1 = px.ops.pool2d(
op_name="pool1",
op_name=pool_name,
input_layer=conv1,
pool_type=pool_type,
pool_size=list(pool_size),
Expand Down Expand Up @@ -221,6 +223,52 @@ def inputs_func(iter):
shutil.rmtree(build_dir)


def conv2d_pool2d_naming_test(conv_names, pool_names) -> None:

in_shape = (1, 4, 4, 1)
target = "DPUCZDX8G-zcu104"
for conv_name, pool_name in zip(conv_names, pool_names):
xgraph = _create_conv2d_pool2d_nhwc_oihw(
in_shape,
(2, 1, 2, 2),
[0, 0],
[1, 1],
[1, 1],
"Max",
[2, 2],
[0, 0],
[1, 1],
1,
False,
"OIHW",
target,
conv_name,
pool_name,
)

def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}

work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)
c_output = c_xgraph.get_compiler_output()

assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0:0"}
# assert c_output.get_out_map("xp0") == {pool_name: "pool1:0"}
assert len(c_output.get_code_files("xp0")) == 1

shutil.rmtree(work_dir)
shutil.rmtree(build_dir)


def _create_scale_conv2d_nhwc_oihw(
in_shape,
w_shape,
Expand Down Expand Up @@ -620,12 +668,104 @@ def inputs_func(iter):
c_xgraph = px.compile(
opt_xgraph, target, work_dir=work_dir, build_dir=build_dir
)

g = xir.Graph.deserialize(os.path.join(build_dir, "xp0.xmodel"))
# TODO subgraphs[1].get_attr("device") -> *** RuntimeError: bad any_cast
subgraphs = get_child_subgraphs(g)
assert (
len(subgraphs) == expected_nb_subgraphs
), "Expected {0} subgraphs but got: {1}".format(
expected_nb_subgraphs, len(subgraphs)
)
)


def _create_multi_output_conv2d_nhwc_oihw(
in_shape,
w_shape,
conv_padding,
conv_strides,
conv_dilation,
kernel_layout="OIHW",
target="DPUCZDX8G-zcu104",
out_names=["out1", "out2"],
) -> XGraph:

kernel_w, kernel_h = w_shape[2], w_shape[3]
W = np.random.randint(-100, 100, size=w_shape).astype(np.float32)
# B = np.array([1., -1.], dtype=np.float32)
out_ch = w_shape[kernel_layout.index("O")]

x1 = px.ops.input("in1", shape=list(in_shape))
w1 = px.ops.constant("weight", W)
conv1 = px.ops.conv2d(
op_name="conv1",
input_layer=x1,
weights_layer=w1,
kernel_size=[kernel_w, kernel_h],
strides=list(conv_strides),
padding_hw=list(conv_padding),
dilation=list(conv_dilation),
data_layout="NHWC",
)
r1 = px.ops.relu("r1", [conv1])

W2 = np.random.randint(-10, 10, size=(10, out_ch, 1, 1)).astype(np.float32)
w2 = px.ops.constant("weight2", W2)
conv2 = px.ops.conv2d(
op_name=out_names[0],
input_layer=r1,
weights_layer=w2,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0],
dilation=[1, 1],
data_layout="NHWC",
)

W3 = np.random.randint(-10, 10, size=(10, out_ch, 1, 1)).astype(np.float32)
w3 = px.ops.constant("weight2", W3)
conv3 = px.ops.conv2d(
op_name="conv3",
input_layer=r1,
weights_layer=w3,
kernel_size=[1, 1],
strides=[1, 1],
padding_hw=[0, 0],
dilation=[1, 1],
data_layout="NHWC",
)
r3 = px.ops.relu(out_names[1], [conv3])

net = [x1, conv1, r1, conv2, conv3, r3]
xgraph = XGRAPH_FACTORY.build_from_xlayer(net)
xgraph = px.partition(xgraph, [target])
return xgraph


def multi_output_conv2d_naming_test(out_names) -> None:

in_shape = (1, 20, 20, 10)
target = "DPUCZDX8G-zcu104"
xgraph = _create_multi_output_conv2d_nhwc_oihw(
in_shape, (10, 10, 2, 2), [0, 0], [1, 1], [1, 1], "OIHW", target, out_names,
)

def inputs_func(iter):
inputs = np.ones(in_shape, dtype=np.float32)
return {"in1": inputs}

work_dir = os.path.join(FILE_PATH, "work")
build_dir = os.path.join(FILE_PATH, "build")
quantize_func = TARGET_REGISTRY.get_target_quantizer(target)
q_xgraph = quantize_func(xgraph, inputs_func, work_dir=work_dir)
opt_xgraph = px.optimize(q_xgraph, target)
c_xgraph = px.compile(opt_xgraph, target, work_dir=work_dir, build_dir=build_dir)
c_output = c_xgraph.get_compiler_output()

assert list(c_output.keys()) == ["xp0"]
assert c_output.get_in_map("xp0") == {"xinput0": "xinput0:0"}
# assert c_output.get_out_map("xp0") == {pool_name: "pool1:0"}
assert len(c_output.get_code_files("xp0")) == 1

shutil.rmtree(work_dir)
shutil.rmtree(build_dir)
Loading

0 comments on commit 52d7e7c

Please sign in to comment.