Skip to content

Commit

Permalink
Working Leaky Relu example
Browse files Browse the repository at this point in the history
  • Loading branch information
BLMartin99 committed Jun 27, 2023
1 parent b3f4781 commit c536152
Show file tree
Hide file tree
Showing 11 changed files with 240 additions and 0 deletions.
43 changes: 43 additions & 0 deletions example-prjs/leaky_relu/catapult.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@

import hls4ml
# import pprint
import yaml
import numpy as np

print(hls4ml.__version__)

with open('config.yml', 'r') as ymlfile:
config = yaml.safe_load(ymlfile)

# try tweaking the reuse_factor on one layer to get different pipelining
# config['HLSConfig']['LayerName']['fc1']['ReuseFactor'] = 4

print('NETWORK')
print(config)

config['OutputDir'] = 'my-Catapult-test'
config['Backend'] = 'Catapult'
config['IOType'] = 'io_stream'

config['HLSConfig']['Model']['Strategy'] = 'Latency'
#config['HLSConfig']['Model']['Strategy'] = 'Resource'

# default threshold is infinity
config['HLSConfig']['Model']['BramFactor'] = np.inf
# set to zero to force all weights onto (external function) interface
config['HLSConfig']['Model']['BramFactor'] = 0

print('CURRENT CONFIGURATION')
print('Backend='+config['Backend'])
print('IOType='+config['IOType'])
print('BramFactor={bf}'.format(bf=config['HLSConfig']['Model']['BramFactor']))

# pprint.pprint(config)

#Convert it to a hls project
hls_model = hls4ml.converters.keras_to_hls(config)

hls_model.build(vsynth=False)

# URL for this info: https://fastmachinelearning.org/hls4ml/setup/QUICKSTART.html

15 changes: 15 additions & 0 deletions example-prjs/leaky_relu/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
Backend: Catapult
KerasJson: leaky_relu.json
KerasH5: leaky_relu_weights.h5
OutputDir: my-Catapult-test
ProjectName: leaky_relu
XilinxPart: xcku115-flvb2104-2-i
Part: xcku115-flvb2104-2-i
ClockPeriod: 5

IOType : io_parallel
HLSConfig:
Model:
Precision: ap_fixed<16, 6>
ReuseFactor: 1
Strategy: Latency
Binary file added example-prjs/leaky_relu/leaky_relu.h5
Binary file not shown.
1 change: 1 addition & 0 deletions example-prjs/leaky_relu/leaky_relu.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 25], "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}}, {"class_name": "LeakyReLU", "config": {"name": "leaky_re_lu", "trainable": true, "dtype": "float32", "alpha": 0.30000001192092896}}]}, "keras_version": "2.11.0", "backend": "tensorflow"}
51 changes: 51 additions & 0 deletions example-prjs/leaky_relu/leaky_relu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import tensorflow as tf
import numpy as np

# Create a relu 1layer that takes in a 25 element array
def create_model():
# Create a model
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(25,)))
model.add(tf.keras.layers.LeakyReLU())

return model

# Save model to forms for hls4ml
def save_model(model, name=None):
# Save as model.h5, model_weights.h5, and model.json
if name is None:
name = model.name
model.save(name + '.h5')
model.save_weights(name + '_weights.h5')
with open(name + '.json', 'w') as outfile:
outfile.write(model.to_json())
return

if __name__ == '__main__':
model = create_model()
save_model(model, name='leaky_relu')

# Image Matrix
image_mat = np.array([
[ 1, -2, 3, -4, 5, -5, 1, -2, 3, -4, 4, -5, 1, -2, 3, -3, 4, -5, 1, -2, 2, -3, 4, -5, 1 ]
])

# Get prediction
prediction = model.predict(image_mat)
print("Image Matrix\n")
print(image_mat)
print("Prediction\n")
print(prediction)

image_mat2 = np.array([
[ -1, 2, -3, 4, -5, -6, 7, -8, 9, -10, -11, 12, -13, 14, -15, -16, 17, -18, 19, -20, -21, 22, -23, 24, -25 ]
])

# Get prediction
prediction = model.predict(image_mat2)
print("Image Matrix\n")
print(image_mat2)
print("Prediction\n")
print(prediction)


Binary file added example-prjs/leaky_relu/leaky_relu_weights.h5
Binary file not shown.
44 changes: 44 additions & 0 deletions example-prjs/leaky_relu/run_catapult.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#! /bin/bash

# This script runs the Catapult flows to generate the HLS.

VENV=/wv/scratch-baimar9c/venv

MGC_HOME=/wv/hlsb/CATAPULT/TOT/CURRENT/aol/Mgc_home
export MGC_HOME

export PATH=/wv/hlstools/python/python37/bin:$PATH:$XILINX_VIVADO/bin:$MGC_HOME/bin
export LD_LIBRARY_PATH=/wv/hlstools/python/python37/lib:$XILINX_VIVADO/lib/lnx64.o:$MGC_HOME/lib
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python

# needed for pytest
export OSTYPE=linux-gnu

echo "Activating Virtual Environment..."
# bash
source $VENV/bin/activate

rm -rf ./my-Catapult-test*

# to run catapult+vivado_rtl
sed -e 's/Vivado/Catapult/g' vivado.py >catapult.py
# to only run catapult
# sed -e 's/Vivado/Catapult/g' vivado.py | sed -e 's/vsynth=True/vsynth=False/g' >catapult.py

# actually run HLS4ML + Catapult (+ optional vivado RTL)
python3 catapult.py

# run just the C++ execution
echo ""
echo "====================================================="
echo "====================================================="
echo "C++ EXECUTION"
pushd my-Catapult-test; rm -f a.out; $MGC_HOME/bin/g++ -std=c++17 -I. -DWEIGHTS_DIR=\"firmware/weights\" -Ifirmware -I$MGC_HOME/shared/include firmware/leaky_relu.cpp leaky_relu_test.cpp; a.out; popd

# Using VSCode setup generated by Catapult
echo ""
echo "====================================================="
echo "====================================================="
echo "To launch VSCode on the C++ generated by hls4ml:"
echo "setenv LD_LIBRARY_PATH $MGC_HOME/lib:$MGC_HOME/shared/lib"
echo "pushd my-Catapult-test; /wv/hlstools/vscode/LATEST/code Catapult.code-workspace"
39 changes: 39 additions & 0 deletions example-prjs/leaky_relu/run_vivado.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#! /bin/bash

# This script runs the Vivado flows to generate the HLS.

VENV=$HOME/venv

MGC_HOME=/wv/hlsb/CATAPULT/TOT/CURRENT/aol/Mgc_home
export MGC_HOME

export PATH=/wv/hlstools/python/python37/bin:$PATH:$XILINX_VIVADO/bin:$MGC_HOME/bin
export LD_LIBRARY_PATH=/wv/hlstools/python/python37/lib:$XILINX_VIVADO/lib/lnx64.o:$MGC_HOME/lib
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python

# needed for pytest
export OSTYPE=linux-gnu

echo "Activating Virtual Environment..."
# bash
source $VENV/bin/activate

rm -rf ./my-Vivado-test*

mkdir -p tb_data

# to run catapult+vivado_rtl
sed -e 's/Vivado/Catapult/g' vivado.py >catapult.py
# to only run catapult
# sed -e 's/Vivado/Catapult/g' vivado.py | sed -e 's/vsynth=True/vsynth=False/g' >catapult.py

# actually run HLS4ML + Vivado HLS
python3 vivado.py

# run just the C++ execution
echo ""
echo "====================================================="
echo "====================================================="
echo "C++ EXECUTION"
pushd my-Vivado-test; rm -f a.out; $MGC_HOME/bin/g++ -g -std=c++11 -I. -DWEIGHTS_DIR=\"firmware/weights\" -Ifirmware -Ifirmware/ap_types -I$MGC_HOME/shared/include firmware/leaky_relu.cpp leaky_relu_test.cpp; a.out; popd

2 changes: 2 additions & 0 deletions example-prjs/leaky_relu/tb_input_features.dat
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
1 -2 3 -4 5 -5 1 -2 3 -4 4 -5 1 -2 3 -3 4 -5 1 -2 2 -3 4 -5 1
-1 2 -3 4 -5 -6 7 -8 9 -10 -11 12 -13 14 -15 -16 17 -18 19 -20 -21 22 -23 24 -25
2 changes: 2 additions & 0 deletions example-prjs/leaky_relu/tb_output_predictions.dat
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
1 -0.6 3 -1.2 5 -1.5 1 -0.6 3 -1.2 4 -1.5 1 -0.6 3 -0.90000004 4 -1.5 1 -0.6 2 -0.90000004 4 -1.5 1
-0.3 2 -0.90000004 4 -1.5 -1.8000001 7 -2.4 9 -3 -3.3000002 12 -3.9 14 -4.5 -4.8 17 -5.4 19 -6 -6.3 22 -6.9 24 -7.5000005
43 changes: 43 additions & 0 deletions example-prjs/leaky_relu/vivado.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@

import hls4ml
# import pprint
import yaml
import numpy as np

print(hls4ml.__version__)

with open('config.yml', 'r') as ymlfile:
config = yaml.safe_load(ymlfile)

# try tweaking the reuse_factor on one layer to get different pipelining
# config['HLSConfig']['LayerName']['fc1']['ReuseFactor'] = 4

print('NETWORK')
print(config)

config['OutputDir'] = 'my-Vivado-test'
config['Backend'] = 'Vivado'
config['IOType'] = 'io_stream'

config['HLSConfig']['Model']['Strategy'] = 'Latency'
#config['HLSConfig']['Model']['Strategy'] = 'Resource'

# default threshold is infinity
config['HLSConfig']['Model']['BramFactor'] = np.inf
# set to zero to force all weights onto (external function) interface
config['HLSConfig']['Model']['BramFactor'] = 0

print('CURRENT CONFIGURATION')
print('Backend='+config['Backend'])
print('IOType='+config['IOType'])
print('BramFactor={bf}'.format(bf=config['HLSConfig']['Model']['BramFactor']))

# pprint.pprint(config)

#Convert it to a hls project
hls_model = hls4ml.converters.keras_to_hls(config)

hls_model.build(vsynth=False)

# URL for this info: https://fastmachinelearning.org/hls4ml/setup/QUICKSTART.html

0 comments on commit c536152

Please sign in to comment.