From 59f7df6a544f77d7cfa40f41c556a09180ebc475 Mon Sep 17 00:00:00 2001 From: Vladimir Loncar Date: Tue, 27 Aug 2019 18:11:33 +0200 Subject: [PATCH] Packaging changes --- MANIFEST.in | 2 +- example-models/keras-config.yml | 8 +-- example-models/onnx-config.yml | 15 ++++- example-models/pytorch-config.yml | 20 +++++-- hls4ml/converters/keras_to_hls.py | 57 ++----------------- hls4ml/converters/onnx_to_hls.py | 49 +++------------- hls4ml/model/hls_model.py | 2 +- hls4ml/model/optimizer/__init__.py | 11 ++++ hls4ml/model/optimizer/optimizer.py | 7 --- hls4ml/model/optimizer/passes/bn_quant.py | 9 +-- hls4ml/model/optimizer/passes/nop.py | 4 +- hls4ml/templates/vivado/firmware/parameters.h | 20 +++---- hls4ml/templates/vivado/myproject_test.cpp | 1 - hls4ml/writer/vivado_writer.py | 57 +++++++++++-------- scripts/hls4ml | 18 +++--- 15 files changed, 116 insertions(+), 164 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 598201f3ad..47a636c11a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,4 +3,4 @@ include README.md graft example-prjs graft example-models graft test -recursive-include hls4ml/hls-templates * +recursive-include hls4ml/templates * diff --git a/example-models/keras-config.yml b/example-models/keras-config.yml index fb8b4da31f..09021c0b88 100644 --- a/example-models/keras-config.yml +++ b/example-models/keras-config.yml @@ -1,7 +1,7 @@ -KerasJson: example-keras-model-files/KERAS_3layer.json -KerasH5: example-keras-model-files/KERAS_3layer_weights.h5 -#InputData: example-keras-model-files/KERAS_3layer_input_features.dat -#OutputPredictions: example-keras-model-files/KERAS_3layer_predictions.dat +KerasJson: keras/KERAS_3layer.json +KerasH5: keras/KERAS_3layer_weights.h5 +#InputData: keras/KERAS_3layer_input_features.dat +#OutputPredictions: keras/KERAS_3layer_predictions.dat OutputDir: my-hls-test ProjectName: myproject XilinxPart: xcku115-flvb2104-2-i diff --git a/example-models/onnx-config.yml b/example-models/onnx-config.yml index 793fbe5d74..c6f58726e3 100644 --- a/example-models/onnx-config.yml +++ b/example-models/onnx-config.yml @@ -1,9 +1,18 @@ -OnnxModel: example-onnx-model-files/three_layer_keras.onnx +OnnxModel: onnx/three_layer_keras.onnx +#InputData: keras/KERAS_3layer_input_features.dat +#OutputPredictions: keras/KERAS_3layer_predictions.dat OutputDir: my-hls-test ProjectName: myproject XilinxPart: xcku115-flvb2104-2-i ClockPeriod: 5 IOType: io_parallel # options: io_serial/io_parallel -ReuseFactor: 1 -DefaultPrecision: ap_fixed<16,6> +HLSConfig: + Model: + Precision: ap_fixed<16,6> + ReuseFactor: 1 +# LayerType: +# Dense: +# ReuseFactor: 2 +# Strategy: Resource +# Compression: True diff --git a/example-models/pytorch-config.yml b/example-models/pytorch-config.yml index c525af72cf..42caad6d9f 100644 --- a/example-models/pytorch-config.yml +++ b/example-models/pytorch-config.yml @@ -1,10 +1,18 @@ -#PytorchModel: example-models/two_layer_model.pt -PytorchModel: example-models/three_layer_model.pt -OutputDir: my-hls-dir-3L +PytorchModel: pytorch/three_layer_model.pt +#InputData: keras/KERAS_3layer_input_features.dat +#OutputPredictions: keras/KERAS_3layer_predictions.dat +OutputDir: my-hls-test ProjectName: myproject -XilinxPart: xc7vx690tffg1927-2 +XilinxPart: xcku115-flvb2104-2-i ClockPeriod: 5 IOType: io_parallel # options: io_serial/io_parallel -ReuseFactor: 1 -DefaultPrecision: ap_fixed<18,8> +HLSConfig: + Model: + Precision: ap_fixed<16,6> + ReuseFactor: 1 +# LayerType: +# Dense: +# ReuseFactor: 2 +# Strategy: Resource +# Compression: True diff --git a/hls4ml/converters/keras_to_hls.py b/hls4ml/converters/keras_to_hls.py index e723ed2fdb..d71ff4d0fa 100644 --- a/hls4ml/converters/keras_to_hls.py +++ b/hls4ml/converters/keras_to_hls.py @@ -1,23 +1,13 @@ from __future__ import print_function import numpy as np import h5py -import os -import tarfile import json -import argparse -import yaml -import sys -from shutil import copyfile import math -MAXMULT = 4096 +from hls4ml.model import HLSModel +from hls4ml.model.optimizer import optimize_model -filedir = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0,os.path.join(filedir, "..", "hls-writer")) -from hls_writer import parse_config, write_hls -sys.path.insert(0,os.path.join(filedir, "..", "hls-writer/optimizer")) -from optimizer import optimize_model -from hls_model import HLSModel +MAXMULT = 4096 class KerasDataReader: def __init__(self, config): @@ -49,42 +39,11 @@ def h5_visitor_func(name): return shape -############################################################################################ -## M A I N -############################################################################################ -def main(): - - # Parse command line arguments - parser = argparse.ArgumentParser(description='') - parser.add_argument("-c", action='store', dest='config', - help="Configuration file.") - args = parser.parse_args() - if not args.config: parser.error('A configuration file needs to be specified.') - - configDir = os.path.abspath(os.path.dirname(args.config)) - yamlConfig = parse_config(args.config) - if not os.path.isabs(yamlConfig['OutputDir']): - yamlConfig['OutputDir'] = os.path.join(configDir, yamlConfig['OutputDir']) - if not os.path.isabs(yamlConfig['KerasH5']): - yamlConfig['KerasH5'] = os.path.join(configDir, yamlConfig['KerasH5']) - if not os.path.isabs(yamlConfig['KerasJson']): - yamlConfig['KerasJson'] = os.path.join(configDir, yamlConfig['KerasJson']) - if 'InputData' in yamlConfig and not os.path.isabs(yamlConfig['InputData']): - yamlConfig['InputData'] = os.path.join(configDir, yamlConfig['InputData']) - if 'OutputPredictions' in yamlConfig and not os.path.isabs(yamlConfig['OutputPredictions']): - yamlConfig['OutputPredictions'] = os.path.join(configDir, yamlConfig['OutputPredictions']) - - if not (yamlConfig["IOType"] == "io_parallel" or yamlConfig["IOType"] == "io_serial"): - raise Exception('ERROR: Invalid IO type') - return yamlConfig - -def keras_to_hls_model(yamlConfig): +def keras_to_hls(yamlConfig): ###################### ## Do translation ###################### - if not os.path.isdir("{}/firmware/weights".format(yamlConfig['OutputDir'])): - os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir'])) #This is a list of dictionaries to hold all the layer info we need to generate HLS layer_list = [] @@ -376,14 +335,8 @@ def keras_to_hls_model(yamlConfig): ################# reader = KerasDataReader(yamlConfig) + print('Creating HLS model') hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers, output_layers) optimizers = ['eliminate_linear_activation', 'merge_batch_norm_quantized_tanh', 'quantize_dense_output'] optimize_model(hls_model, optimizers) - #write_hls(hls_model) return hls_model - - -if __name__ == "__main__": - yamlConfig = main() - hls_model = keras_to_hls_model(yamlConfig) - write_hls(hls_model) diff --git a/hls4ml/converters/onnx_to_hls.py b/hls4ml/converters/onnx_to_hls.py index f97f443880..df132bec17 100644 --- a/hls4ml/converters/onnx_to_hls.py +++ b/hls4ml/converters/onnx_to_hls.py @@ -1,23 +1,14 @@ from __future__ import print_function import numpy as np -import h5py -import os -import tarfile -import json -import argparse -import yaml -import sys -from shutil import copyfile import math from onnx import ModelProto, GraphProto, NodeProto, TensorProto from onnx import optimizer, helper, numpy_helper, shape_inference -MAXMULT = 4096 +from hls4ml.writer.vivado_writer import write_hls +from hls4ml.model import HLSModel +from hls4ml.model.optimizer import optimize_model -filedir = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0,os.path.join(filedir, "..", "hls-writer")) -from hls_writer import parse_config, write_hls -from hls_model import HLSModel +MAXMULT = 4096 class ONNXDataReader: def __init__(self, model): @@ -126,33 +117,11 @@ def compute_pads_2d(operation, layer): return pads -############################################################################################ -## M A I N -############################################################################################ -def main(): - - # Parse command line arguments - parser = argparse.ArgumentParser(description='') - parser.add_argument("-c", action='store', dest='config', - help="Configuration file.") - args = parser.parse_args() - if not args.config: parser.error('A configuration file needs to be specified.') - - configDir = os.path.abspath(os.path.dirname(args.config)) - yamlConfig = parse_config(args.config) - if not os.path.isabs(yamlConfig['OutputDir']): - yamlConfig['OutputDir'] = os.path.join(configDir, yamlConfig['OutputDir']) - if not os.path.isabs(yamlConfig['OnnxModel']): - yamlConfig['OnnxModel'] = os.path.join(configDir, yamlConfig['OnnxModel']) - - if not (yamlConfig["IOType"] == "io_parallel" or yamlConfig["IOType"] == "io_serial"): - raise Exception('ERROR: Invalid IO type') +def onnx_to_hls(yamlConfig): ###################### ## Do translation ###################### - if not os.path.isdir("{}/firmware/weights".format(yamlConfig['OutputDir'])): - os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir'])) #This is a list of dictionaries to hold all the layer info we need to generate HLS layer_list = [] @@ -402,8 +371,8 @@ def main(): ## Generate HLS ################# + print('Creating HLS model') hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers, output_layers) - write_hls(hls_model) - -if __name__ == "__main__": - main() + optimizers = ['eliminate_linear_activation', 'merge_batch_norm_quantized_tanh', 'quantize_dense_output'] + optimize_model(hls_model, optimizers) + return hls_model diff --git a/hls4ml/model/hls_model.py b/hls4ml/model/hls_model.py index 553311ed12..342ce087ba 100644 --- a/hls4ml/model/hls_model.py +++ b/hls4ml/model/hls_model.py @@ -5,7 +5,7 @@ from enum import Enum from collections import OrderedDict -from templates import get_config_template, get_function_template +from .templates import get_config_template, get_function_template class HLSConfig(object): def __init__(self, config): diff --git a/hls4ml/model/optimizer/__init__.py b/hls4ml/model/optimizer/__init__.py index e69de29bb2..abcf4ebdbc 100644 --- a/hls4ml/model/optimizer/__init__.py +++ b/hls4ml/model/optimizer/__init__.py @@ -0,0 +1,11 @@ +from __future__ import absolute_import + +from .optimizer import OptimizerPass, register_pass, get_optimizer, optimize_model + + +from .passes.nop import EliminateLinearActivation +from .passes.bn_quant import MergeBatchNormAndQuantizedTanh, QuantizeDenseOutput + +register_pass('eliminate_linear_activation', EliminateLinearActivation) +register_pass('merge_batch_norm_quantized_tanh', MergeBatchNormAndQuantizedTanh) +register_pass('quantize_dense_output', QuantizeDenseOutput) \ No newline at end of file diff --git a/hls4ml/model/optimizer/optimizer.py b/hls4ml/model/optimizer/optimizer.py index 356b15041c..1a21d5d1b1 100644 --- a/hls4ml/model/optimizer/optimizer.py +++ b/hls4ml/model/optimizer/optimizer.py @@ -41,10 +41,3 @@ def optimize_model(model, passes=None): break else: optimization_done = True - -from passes.nop import EliminateLinearActivation -from passes.bn_quant import MergeBatchNormAndQuantizedTanh, QuantizeDenseOutput - -register_pass('eliminate_linear_activation', EliminateLinearActivation) -register_pass('merge_batch_norm_quantized_tanh', MergeBatchNormAndQuantizedTanh) -register_pass('quantize_dense_output', QuantizeDenseOutput) diff --git a/hls4ml/model/optimizer/passes/bn_quant.py b/hls4ml/model/optimizer/passes/bn_quant.py index 4d5dafd62e..32467f0bd2 100644 --- a/hls4ml/model/optimizer/passes/bn_quant.py +++ b/hls4ml/model/optimizer/passes/bn_quant.py @@ -1,12 +1,9 @@ import numpy as np -import sys import re -sys.path.insert(0, '../') -from optimizer import OptimizerPass -sys.path.insert(0, '../..') -import hls_model -import templates +from ..optimizer import OptimizerPass +import hls4ml.model.hls_model as hls_model +import hls4ml.model.templates as templates class BatchNormalizationQuantizedTanh(hls_model.Layer): ''' Merged Batch Normalization and quantized (binary or ternary) Tanh layer. diff --git a/hls4ml/model/optimizer/passes/nop.py b/hls4ml/model/optimizer/passes/nop.py index 3721005dcf..480702b9a4 100644 --- a/hls4ml/model/optimizer/passes/nop.py +++ b/hls4ml/model/optimizer/passes/nop.py @@ -1,6 +1,4 @@ -import sys -sys.path.insert(0, '../') -from optimizer import OptimizerPass +from ..optimizer import OptimizerPass class EliminateLinearActivation(OptimizerPass): def match(self, node): diff --git a/hls4ml/templates/vivado/firmware/parameters.h b/hls4ml/templates/vivado/firmware/parameters.h index 8aa80a68b6..19e2ef8511 100644 --- a/hls4ml/templates/vivado/firmware/parameters.h +++ b/hls4ml/templates/vivado/firmware/parameters.h @@ -4,16 +4,16 @@ #include #include "ap_int.h" #include "ap_fixed.h" -#include "nnet_dense.h" -#include "nnet_dense_large.h" -#include "nnet_dense_compressed.h" -#include "nnet_conv.h" -#include "nnet_conv2d.h" -#include "nnet_activation.h" -#include "nnet_common.h" -#include "nnet_batchnorm.h" -#include "nnet_pooling.h" -#include "nnet_merge.h" +#include "nnet_utils/nnet_dense.h" +#include "nnet_utils/nnet_dense_large.h" +#include "nnet_utils/nnet_dense_compressed.h" +#include "nnet_utils/nnet_conv.h" +#include "nnet_utils/nnet_conv2d.h" +#include "nnet_utils/nnet_activation.h" +#include "nnet_utils/nnet_common.h" +#include "nnet_utils/nnet_batchnorm.h" +#include "nnet_utils/nnet_pooling.h" +#include "nnet_utils/nnet_merge.h" //hls-fpga-machine-learning insert numbers diff --git a/hls4ml/templates/vivado/myproject_test.cpp b/hls4ml/templates/vivado/myproject_test.cpp index d00e010615..82ae6352d3 100644 --- a/hls4ml/templates/vivado/myproject_test.cpp +++ b/hls4ml/templates/vivado/myproject_test.cpp @@ -24,7 +24,6 @@ #include "firmware/parameters.h" #include "firmware/myproject.h" -#include "nnet_helpers.h" #define CHECKPOINT 5000 diff --git a/hls4ml/writer/vivado_writer.py b/hls4ml/writer/vivado_writer.py index 758abaca94..8b6c1aef9f 100644 --- a/hls4ml/writer/vivado_writer.py +++ b/hls4ml/writer/vivado_writer.py @@ -5,17 +5,9 @@ import numpy as np import os import re +import glob from collections import OrderedDict -####################################### -## Config module -####################################### -def parse_config(config_file) : - - print("Loading configuration from", config_file) - config = open(config_file, 'r') - return yaml.load(config) - ####################################### ## Print weight array to C++ ####################################### @@ -34,14 +26,6 @@ def print_array_to_cpp(var, odir): f.write(var.definition_cpp()) f.write(" = {") - if 'int' in var.type.precision: - precision_fmt = '%d' - else: - precision_bits = re.search('.+<(.+?)>', var.type.precision).group(1).split(',') - decimal_bits = int(precision_bits[0]) - int(precision_bits[1]) - decimal_spaces = int(np.floor(np.log10(2 ** decimal_bits - 1))) + 1 - precision_fmt = '%.{}f'.format(decimal_spaces) - #fill c++ array. #not including internal brackets for multidimensional case sep = '' @@ -51,13 +35,17 @@ def print_array_to_cpp(var, odir): f.write("};\n") f.close() +def write_project_dir(model): + if not os.path.isdir("{}/firmware/weights".format(model.config.get_output_dir())): + os.makedirs("{}/firmware/weights".format(model.config.get_output_dir())) + def write_project_cpp(model): ################### ## myproject.cpp ################### filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir,'../hls-template/firmware/myproject.cpp'),'r') + f = open(os.path.join(filedir,'../templates/vivado/firmware/myproject.cpp'),'r') fout = open('{}/firmware/{}.cpp'.format(model.config.get_output_dir(), model.config.get_project_name()),'w') model_inputs = model.get_input_variables() @@ -141,7 +129,7 @@ def write_project_header(model): ####################### filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir,'../hls-template/firmware/myproject.h'),'r') + f = open(os.path.join(filedir,'../templates/vivado/firmware/myproject.h'),'r') fout = open('{}/firmware/{}.h'.format(model.config.get_output_dir(), model.config.get_project_name()),'w') model_inputs = model.get_input_variables() @@ -175,7 +163,7 @@ def write_project_header(model): def write_parameters(model): filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir,'../hls-template/firmware/parameters.h'),'r') + f = open(os.path.join(filedir,'../templates/vivado/firmware/parameters.h'),'r') fout = open('{}/firmware/parameters.h'.format(model.config.get_output_dir()),'w') for line in f.readlines(): @@ -228,7 +216,7 @@ def write_test_bench(model): if output_predictions is not None: copyfile(output_predictions, '{}/tb_data/tb_output_predictions.dat'.format(model.config.get_output_dir())) - f = open(os.path.join(filedir,'../hls-template/myproject_test.cpp'),'r') + f = open(os.path.join(filedir,'../templates/vivado/myproject_test.cpp'),'r') fout = open('{}/{}_test.cpp'.format(model.config.get_output_dir(), model.config.get_project_name()),'w') for line in f.readlines(): @@ -302,10 +290,10 @@ def write_build_script(model): ################### filedir = os.path.dirname(os.path.abspath(__file__)) - nnetdir = os.path.abspath(os.path.join(filedir, "../nnet_utils")) + nnetdir = os.path.abspath(os.path.join(filedir, "../templates/vivado/nnet_utils")) relpath = os.path.relpath(nnetdir, start=model.config.get_output_dir()) - f = open(os.path.join(filedir,'../hls-template/build_prj.tcl'),'r') + f = open(os.path.join(filedir,'../templates/vivado/build_prj.tcl'),'r') fout = open('{}/build_prj.tcl'.format(model.config.get_output_dir()),'w') for line in f.readlines(): @@ -322,6 +310,25 @@ def write_build_script(model): f.close() fout.close() +def write_nnet_utils(model): + ################### + ## nnet_utils + ################### + + filedir = os.path.dirname(os.path.abspath(__file__)) + + srcpath = os.path.join(filedir,'../templates/vivado/nnet_utils/') + dstpath = '{}/firmware/nnet_utils/'.format(model.config.get_output_dir()) + + if not os.path.exists(dstpath): + os.mkdir(dstpath) + + headers = [os.path.basename(h) for h in glob.glob(srcpath + '*.h')] + + for h in headers: + copyfile(srcpath + h, dstpath + h) + + def write_tar(model): ################### # Tarball output @@ -331,10 +338,14 @@ def write_tar(model): archive.add(model.config.get_output_dir(), recursive=True) def write_hls(model): + print('Writing HLS project') + write_project_dir(model) write_project_cpp(model) write_project_header(model) write_weights(model) write_parameters(model) write_test_bench(model) write_build_script(model) + write_nnet_utils(model) write_tar(model) + print('Done') diff --git a/scripts/hls4ml b/scripts/hls4ml index 4da2f3805e..1a0638631d 100755 --- a/scripts/hls4ml +++ b/scripts/hls4ml @@ -10,7 +10,7 @@ import yaml import hls4ml def parse_config(config_file): - print("Loading configuration from", config_file) + print('Loading configuration from', config_file) config = open(config_file, 'r') return yaml.load(config, Loader=yaml.SafeLoader) @@ -18,9 +18,9 @@ def main(): parser = argparse.ArgumentParser(description='HLS4ML - Machine learning inference in FPGAs') subparsers = parser.add_subparsers() - config_parser = subparsers.add_parser("config", help='Create a conversion configuration file') - convert_parser = subparsers.add_parser("convert", help='Convert Keras or ONNX model to HLS') - build_parser = subparsers.add_parser("build") + config_parser = subparsers.add_parser('config', help='Create a conversion configuration file') + convert_parser = subparsers.add_parser('convert', help='Convert Keras or ONNX model to HLS') + build_parser = subparsers.add_parser('build', help='Build generated HLS project') config_parser.add_argument('-m', '--model', help='Model file to convert (Keras .h5 or .json, or ONNX .onnx file)', default=None) config_parser.add_argument('-w', '--weights', help='Optional weights file (if Keras .json file is provided))', default=None) @@ -51,12 +51,16 @@ def config(args): def convert(args): yamlConfig = parse_config(args.config) + model = None if 'OnnxModel' in yamlConfig: - hls4ml.converters.onnx_to_hls(yamlConfig) + model = hls4ml.converters.onnx_to_hls(yamlConfig) elif 'PytorchModel' in yamlConfig: - hls4ml.converters.pytorch_to_hls(yamlConfig) + model = hls4ml.converters.pytorch_to_hls(yamlConfig) else: - hls4ml.converters.keras_to_hls(yamlConfig) + model = hls4ml.converters.keras_to_hls(yamlConfig) + + if model is not None: + hls4ml.writer.vivado_writer.write_hls(model) def build(args): if args.project is None: