Skip to content

Commit

Permalink
fix issues with depth_multiplier and seperable/depthconv
Browse files Browse the repository at this point in the history
  • Loading branch information
David Burnette committed Apr 30, 2024
1 parent e753456 commit f8a07f1
Show file tree
Hide file tree
Showing 4 changed files with 139 additions and 37 deletions.
96 changes: 92 additions & 4 deletions hls4ml/backends/catapult/passes/convolution_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def __init__(self):
static const unsigned pad_right = {pad_right};
static const unsigned in_height = {in_height};
static const unsigned in_width = {in_width};
static const unsigned n_chan = {n_chan};
static const unsigned n_chan = {n_chan2};
static const unsigned filt_height = {filt_height};
static const unsigned filt_width = {filt_width};
static const unsigned kernel_size = filt_height * filt_width;
Expand Down Expand Up @@ -174,6 +174,51 @@ def __init__(self):
const ac_int<config{index}::filt_height * config{index}::filt_width,false> config{index}::pixels[] = {{{instructions}}};
#endif\n"""

depthwiseconv2d_config_template = """struct config{index} : nnet::depthwiseconv2d_config {{
static const unsigned pad_top = {pad_top};
static const unsigned pad_bottom = {pad_bottom};
static const unsigned pad_left = {pad_left};
static const unsigned pad_right = {pad_right};
static const unsigned in_height = {in_height};
static const unsigned in_width = {in_width};
static const unsigned n_chan = {n_chan};
static const unsigned filt_height = {filt_height};
static const unsigned filt_width = {filt_width};
static const unsigned kernel_size = filt_height * filt_width;
static const unsigned d_mult = {d_mult};
static const unsigned n_filt = d_mult * n_chan;
static const unsigned stride_height = {stride_height};
static const unsigned stride_width = {stride_width};
static const unsigned out_height = {out_height};
static const unsigned out_width = {out_width};
static const unsigned reuse_factor = {reuse};
static const unsigned n_zeros = {nzeros};
static const unsigned multiplier_limit =
DIV_ROUNDUP(kernel_size * n_chan * n_filt, reuse_factor) - n_zeros / reuse_factor;
static const bool store_weights_in_bram = false;
static const unsigned strategy = nnet::{strategy};
static const nnet::conv_implementation implementation = nnet::conv_implementation::{implementation};
static const unsigned min_height = {min_height};
static const unsigned min_width = {min_width};
static const ac_int<filt_height * filt_width,false> pixels[min_height * min_width];
static const unsigned n_partitions = {n_partitions};
static const unsigned n_pixels = out_height * out_width / n_partitions;
template<class data_T, class CONFIG_T>
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
typedef {accum_t.name} accum_t;
typedef {bias_t.name} bias_t;
typedef {weight_t.name} weight_t;
typedef {config_t} mult_config;
template<unsigned K, unsigned S, unsigned W>
using scale_index_height = nnet::{scale_index_height_type}<K, S, W>;
template<unsigned K, unsigned S, unsigned W>
using scale_index_width = nnet::{scale_index_width_type}<K, S, W>;
}};
// really this allocation of pixels array ought to be in a .cpp file
#ifndef INCLUDED_MC_TESTBENCH_H
const ac_int<config{index}::filt_height * config{index}::filt_width,false> config{index}::pixels[] = {{{instructions}}};
#endif\n"""

conv2d_function_template = 'nnet::conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
depthconv2d_function_template = (
'nnet::depthwise_conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
Expand All @@ -184,7 +229,7 @@ def __init__(self):

class Conv2DConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((Conv2D, Conv2DBatchnorm, DepthwiseConv2D))
super().__init__((Conv2D, Conv2DBatchnorm))
self.template = conv2d_config_template
self.mult_template = conv_mult_config_template

Expand Down Expand Up @@ -224,6 +269,48 @@ def format(self, node):
return mult_config + '\n' + conv_config


class DepthwiseConv2DConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(DepthwiseConv2D)
self.template = depthwiseconv2d_config_template
self.mult_template = conv_mult_config_template

def format(self, node):
params = self._default_config_params(node)
params['dilation'] = node.get_attr('dilation', 1)
params['nzeros'] = node.get_weights('weight').nzeros

params['config_t'] = f'config{node.index}_mult'

if node.get_attr('in_height') == node.get_attr('min_height'):
params['scale_index_height_type'] = 'scale_index_unscaled'
else:
params['scale_index_height_type'] = 'scale_index_regular'

if node.get_attr('in_width') == node.get_attr('min_width'):
params['scale_index_width_type'] = 'scale_index_unscaled'
else:
params['scale_index_width_type'] = 'scale_index_regular'

if node.model.config.get_config_value('IOType') == 'io_parallel':
params['fill_fn'] = f'fill_buffer_{node.index}'
else:
params['fill_fn'] = 'FillConv2DBuffer'

conv_config = self.template.format(**params)

mult_params = self._default_config_params(node)
mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_height') * node.get_attr('filt_width')
mult_params['n_out'] = node.get_attr('n_filt')
mult_params['nzeros'] = node.get_weights('weight').nzeros
mult_params['product_type'] = get_backend('catapult').product_type(
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
)
mult_config = self.mult_template.format(**mult_params)

return mult_config + '\n' + conv_config


class Conv2DFunctionTemplate(FunctionCallTemplate):
def __init__(self):
super().__init__((Conv2D, Conv2DBatchnorm), include_header=conv2d_include_list)
Expand Down Expand Up @@ -291,6 +378,7 @@ def format(self, node):
params['nzeros'] = node.get_weights('depthwise').nzeros
params['index'] = str(node.index) + '_depthwise'
params['weight_t'] = node.get_weights('depthwise').type
params['bias_t'] = node.get_weights('zero_bias').type
params['fill_fn'] = 'FillConv1DBuffer'

if node.get_attr('unscaled'):
Expand Down Expand Up @@ -384,7 +472,7 @@ class SeparableConv2DConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(SeparableConv2D)
self.template = sepconv_config_template
self.depthwise_template = conv2d_config_template
self.depthwise_template = depthwiseconv2d_config_template
self.pointwise_template = conv2d_config_template
self.depthwise_mult_template = conv_mult_config_template
self.pointwise_mult_template = conv_mult_config_template
Expand Down Expand Up @@ -469,7 +557,7 @@ def format(self, node):
# Pointwise mult config
mult_params = self._default_config_params(node)
mult_params['index'] = str(node.index) + '_pointwise'
mult_params['n_in'] = node.get_attr('n_chan')
mult_params['n_in'] = node.get_attr('n_chan2')
mult_params['n_out'] = node.get_attr('n_filt')
mult_params['nzeros'] = node.get_weights('pointwise').nzeros
mult_params['weight_t'] = node.get_weights('pointwise').type
Expand Down
17 changes: 14 additions & 3 deletions hls4ml/converters/keras/convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,21 @@ def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader):

layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias')

if 'filters' in keras_layer['config']:
layer['n_filt'] = keras_layer['config']['filters']
layer['n_chan2'] = layer['n_chan']
if 'depth_multiplier' in keras_layer['config']:
# 'SeparableConv2D', 'DepthwiseConv2D'
layer['d_mult'] = keras_layer['config']['depth_multiplier']
if 'filters' in keras_layer['config']:
# 'SeparableConv2D'
layer['n_filt'] = keras_layer['config']['filters']
layer['n_chan2'] = layer['d_mult'] * layer['n_chan']
else:
# 'DepthwiseConv2D'
layer['n_filt'] = layer['d_mult'] * layer['n_chan']
else:
layer['n_filt'] = layer['n_chan']
# 'Conv2D'
layer['n_filt'] = keras_layer['config']['filters']

layer['filt_height'] = keras_layer['config']['kernel_size'][0]
layer['filt_width'] = keras_layer['config']['kernel_size'][1]
layer['stride_height'] = keras_layer['config']['strides'][0]
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/converters/pytorch/convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ def parse_conv2d_layer(operation, layer_name, input_names, input_shapes, node, c
layer['bias_data'] = class_object.bias.data.numpy()
else:
layer['bias_data'] = None

# Input info
(layer['in_height'], layer['in_width'], layer['n_chan']) = parse_data_format(
input_shapes[0], 'channels_first'
) # Keras's default is channels_last
layer['n_chan2'] = layer['n_chan']

# Additional parameters
layer['n_filt'] = class_object.out_channels
Expand Down
61 changes: 32 additions & 29 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -581,6 +581,7 @@ class SeparableConv2D(Layer):
Attribute('out_height'),
Attribute('out_width'),
Attribute('n_chan'),
Attribute('d_mult'),
Attribute('n_filt'),
Attribute('filt_height'),
Attribute('filt_width'),
Expand Down Expand Up @@ -610,20 +611,42 @@ def initialize(self):
self.add_weights_variable(name='depthwise', var_name='d{index}', quantizer=self.get_attr('depthwise_quantizer'))
self.add_weights_variable(name='pointwise', var_name='p{index}', quantizer=self.get_attr('pointwise_quantizer'))

zero_bias_data = np.zeros((self.attributes['n_chan'],))
zero_bias_data = np.zeros((self.attributes['n_chan2'],))
precision = IntegerPrecisionType(width=1, signed=False)
self.add_weights_variable(name='zero_bias', var_name='z{index}', data=zero_bias_data, precision=precision)

self.add_bias(quantizer=self.get_attr('bias_quantizer'))


class DepthwiseConv2D(Conv2D):
_expected_attributes = [
Attribute('in_height'),
Attribute('in_width'),
Attribute('out_height'),
Attribute('out_width'),
Attribute('n_chan'),
Attribute('d_mult'),
Attribute('n_filt'),
Attribute('filt_height'),
Attribute('filt_width'),
Attribute('stride_height'),
Attribute('stride_width'),
Attribute('pad_top'),
Attribute('pad_bottom'),
Attribute('pad_left'),
Attribute('pad_right'),
WeightAttribute('weight'),
WeightAttribute('bias'),
TypeAttribute('weight'),
TypeAttribute('bias'),
]

def initialize(self):
if self.get_attr('data_format') == 'channels_last':
shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_chan']]
shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
else:
shape = [self.attributes['n_chan'], self.attributes['out_height'], self.attributes['out_width']]
shape = [self.attributes['n_filt'], self.attributes['out_height'], self.attributes['out_width']]
dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}']
self.add_output_variable(shape, dims)

Expand Down Expand Up @@ -912,34 +935,14 @@ def initialize(self):


class Resize(Layer):
_expected_attributes = [
Attribute('in_height'),
Attribute('in_width'),
Attribute('out_height'),
Attribute('out_width'),
Attribute('n_chan'),
ChoiceAttribute('algorithm', ['nearest', 'bilinear'], default='nearest'),
Attribute('align_corners', value_type=bool, default=False),
]

def initialize(self):
inp = self.get_input_variable()

if self.get_attr('data_format') == 'channels_last':
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
else:
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_height'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}']

if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
self.add_output_variable(shape, dims, precision=inp.type.precision)


Expand Down

0 comments on commit f8a07f1

Please sign in to comment.