Skip to content

Commit

Permalink
Merge branch 'fastmachinelearning:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
dgburnette committed Apr 19, 2024
2 parents c2ed5fb + 1616caf commit b02d224
Show file tree
Hide file tree
Showing 12 changed files with 756 additions and 29 deletions.
2 changes: 2 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ stages:
generator:
stage: generate
image: python:3.8-alpine
variables:
N_TESTS_PER_YAML: 5
tags:
- k8s-default
before_script:
Expand Down
14 changes: 13 additions & 1 deletion hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,10 @@
ExponentPrecisionType,
FixedPrecisionType,
IntegerPrecisionType,
PrecisionType,
RoundingMode,
SaturationMode,
UnspecifiedPrecisionType,
XnorPrecisionType,
)
from hls4ml.writer import get_writer
Expand Down Expand Up @@ -290,9 +292,12 @@ def get_valid_conv_partition_splits(self, out_height, out_width):

@classmethod
def convert_precision_string(cls, precision):
if isinstance(precision, IntegerPrecisionType) or isinstance(precision, FixedPrecisionType):
if isinstance(precision, PrecisionType):
return precision

if precision.lower() == 'auto':
return cls._convert_auto_type(precision)

if precision.startswith('ac_'):
return cls._convert_ac_type(precision)
else:
Expand Down Expand Up @@ -366,6 +371,13 @@ def _convert_ac_type(cls, precision):
elif 'int' in precision:
return IntegerPrecisionType(width, signed)

@classmethod
def _convert_auto_type(cls, precision):
'''
Convert a "auto" precision string into the UnspecifiedPrecisionType
'''
return UnspecifiedPrecisionType()

def product_type(self, data_T, weight_T):
'''
Helper function to determine which product implementation to use during inference
Expand Down
1 change: 1 addition & 0 deletions hls4ml/backends/quartus/quartus_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def _register_flows(self):
'quartus:inplace_stream_flatten',
'quartus:skip_softmax',
'quartus:fix_softmax_table_size',
'infer_precision_types',
]
optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name)

Expand Down
1 change: 1 addition & 0 deletions hls4ml/backends/vivado/passes/convolution_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ def format(self, node):
params['nzeros'] = node.get_weights('depthwise').nzeros
params['index'] = str(node.index) + '_depthwise'
params['weight_t'] = node.get_weights('depthwise').type
params['bias_t'] = node.get_weights('zero_bias').type
params['fill_fn'] = 'FillConv1DBuffer'

if node.get_attr('unscaled'):
Expand Down
1 change: 1 addition & 0 deletions hls4ml/backends/vivado/vivado_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ def _register_flows(self):
'vivado:inplace_stream_flatten',
'vivado:skip_softmax',
'vivado:fix_softmax_table_size',
'infer_precision_types',
]
optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name)

Expand Down
2 changes: 2 additions & 0 deletions hls4ml/model/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
register_flow(
'convert',
[
'infer_precision_types',
'channels_last_converter',
'fuse_bias_add',
'remove_useless_transpose',
Expand All @@ -51,6 +52,7 @@
'fuse_consecutive_batch_normalization',
'fuse_batch_normalization',
'replace_multidimensional_dense_with_conv',
'infer_precision_types',
'set_precision_concat',
],
requires=['convert'],
Expand Down
Loading

0 comments on commit b02d224

Please sign in to comment.