Skip to content

Commit

Permalink
Edge Impulse SDK v1.46.1
Browse files Browse the repository at this point in the history
Signed-off-by: francovaro <[email protected]>
  • Loading branch information
francovaro committed Feb 27, 2024
1 parent 4aa0053 commit 446e155
Show file tree
Hide file tree
Showing 9 changed files with 93 additions and 68 deletions.
13 changes: 8 additions & 5 deletions EdgeImpulse.EI-SDK.pdsc
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
<name>EI-SDK</name>
<license>LICENSE-apache-2.0.txt</license>
<description>Edge Impulse SDK</description>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/tag/v1.45.6</url>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/tag/v1.46.1</url>
<supportContact>[email protected]</supportContact>
<repository type="git">https://github.com/edgeimpulse/edge-impulse-sdk-pack.git</repository>
<releases>
<release version="1.45.6" tag="v1.45.6" date="2024-02-26" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.45.6/EdgeImpulse.EI-SDK.1.45.6.pack">
<release version="1.46.1" tag="v1.46.1" date="2024-02-27" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.46.1/EdgeImpulse.EI-SDK.1.46.1.pack">
EI-SDK
</release>
<release version="1.45.6" tag="v1.45.6" date="2024-02-26" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.45.6/EdgeImpulse.EI-SDK.1.45.6.pack">
EI-SDK
</release>
<release version="1.45.5" tag="v1.45.5" date="2024-02-20" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.45.5/EdgeImpulse.EI-SDK.1.45.5.pack">
EI-SDK
</release>
Expand Down Expand Up @@ -68,7 +71,7 @@
</packages>
</requirements>
<components>
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.45.6">
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.46.1">
<description>Edge Impulse SDK</description>
<!-- short component description -->
<files>
Expand Down Expand Up @@ -233,11 +236,11 @@
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/tree_ensemble_classifier.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_utils.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/common.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.cpp"/>
<file category="source" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cpp"/>
Expand Down Expand Up @@ -429,7 +432,6 @@
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_plan_struct.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/tree_ensemble_classifier.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/padding.h"/>
Expand Down Expand Up @@ -513,6 +515,7 @@
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h"/>
<file category="header" name="edgeimpulse/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h"/>
Expand Down
4 changes: 2 additions & 2 deletions EdgeImpulse.pidx
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
<index schemaVersion="1.0.0" xs:noNamespaceSchemaLocation="PackIndex.xsd" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance">
<vendor>EdgeImpulse</vendor>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/</url>
<timestamp>2024-02-26 13:59:56</timestamp>
<timestamp>2024-02-27 11:41:30</timestamp>
<pindex>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/tag/v1.45.6" vendor="EdgeImpulse" name="EI-SDK" version="1.45.6"/>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/download/tag/v1.46.1" vendor="EdgeImpulse" name="EI-SDK" version="1.46.1"/>
</pindex>
</index>
56 changes: 33 additions & 23 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -324,9 +324,6 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impul
uint64_t dsp_start_us = ei_read_timer_us();

size_t out_features_index = 0;
bool is_mfcc = false;
bool is_mfe = false;
bool is_spectrogram = false;

for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = impulse->dsp_blocks[ix];
Expand All @@ -344,15 +341,12 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impul
/* Switch to the slice version of the mfcc feature extract function */
if (block.extract_fn == extract_mfcc_features) {
extract_fn_slice = &extract_mfcc_per_slice_features;
is_mfcc = true;
}
else if (block.extract_fn == extract_spectrogram_features) {
extract_fn_slice = &extract_spectrogram_per_slice_features;
is_spectrogram = true;
}
else if (block.extract_fn == extract_mfe_features) {
extract_fn_slice = &extract_mfe_per_slice_features;
is_mfe = true;
}
else {
ei_printf("ERR: Unknown extract function, only MFCC, MFE and spectrogram supported\n");
Expand Down Expand Up @@ -401,33 +395,48 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impul
if (classifier_continuous_features_written >= impulse->nn_input_frame_size) {
dsp_start_us = ei_read_timer_us();

ei_feature_t feature;
std::unique_ptr<ei::matrix_t> matrix_ptr(new ei::matrix_t(1, impulse->nn_input_frame_size));
feature.matrix = matrix_ptr.get();
feature.blockId = 0;
uint32_t block_num = impulse->dsp_blocks_size + impulse->learning_blocks_size;

/* Create a copy of the matrix for normalization */
for (size_t m_ix = 0; m_ix < impulse->nn_input_frame_size; m_ix++) {
feature.matrix->buffer[m_ix] = static_features_matrix.buffer[m_ix];
}
// smart pointer to features array
std::unique_ptr<ei_feature_t[]> features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();

if (is_mfcc) {
calc_cepstral_mean_and_var_normalization_mfcc(feature.matrix, impulse->dsp_blocks[0].config);
}
else if (is_spectrogram) {
calc_cepstral_mean_and_var_normalization_spectrogram(feature.matrix, impulse->dsp_blocks[0].config);
}
else if (is_mfe) {
calc_cepstral_mean_and_var_normalization_mfe(feature.matrix, impulse->dsp_blocks[0].config);
// have it outside of the loop to avoid going out of scope
std::unique_ptr<ei::matrix_t> *matrix_ptrs = new std::unique_ptr<ei::matrix_t>[block_num];

out_features_index = 0;
// iterate over every dsp block and run normalization
for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = impulse->dsp_blocks[ix];
matrix_ptrs[ix] = std::unique_ptr<ei::matrix_t>(new ei::matrix_t(1, block.n_output_features));
features[ix].matrix = matrix_ptrs[ix].get();
features[ix].blockId = block.blockId;

/* Create a copy of the matrix for normalization */
for (size_t m_ix = 0; m_ix < block.n_output_features; m_ix++) {
features[ix].matrix->buffer[m_ix] = static_features_matrix.buffer[out_features_index + m_ix];
}

if (block.extract_fn == extract_mfcc_features) {
calc_cepstral_mean_and_var_normalization_mfcc(features[ix].matrix, block.config);
}
else if (block.extract_fn == extract_spectrogram_features) {
calc_cepstral_mean_and_var_normalization_spectrogram(features[ix].matrix, block.config);
}
else if (block.extract_fn == extract_mfe_features) {
calc_cepstral_mean_and_var_normalization_mfe(features[ix].matrix, block.config);
}
out_features_index += block.n_output_features;
}

result->timing.dsp_us += ei_read_timer_us() - dsp_start_us;
result->timing.dsp = (int)(result->timing.dsp_us / 1000);

if (debug) {
ei_printf("Running impulse...\n");
}

ei_impulse_error = run_inference(impulse, &feature, result, debug);
ei_impulse_error = run_inference(impulse, features, result, debug);

#if EI_CLASSIFIER_CALIBRATION_ENABLED
if (impulse->sensor == EI_CLASSIFIER_SENSOR_MICROPHONE) {
Expand Down Expand Up @@ -471,6 +480,7 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impul
}
}
#endif
delete[] matrix_ptrs;
}
else {
for (int i = 0; i < impulse->label_count; i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
#include "tensorflow-lite/tensorflow/lite/kernels/register.h"
#include "tensorflow-lite/tensorflow/lite/model.h"
#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/tree_ensemble_classifier.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"
#include "edge-impulse-sdk/classifier/ei_model_types.h"
#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
#include "tensorflow-lite/tensorflow/lite/model.h"
#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h"
#endif
#include "edge-impulse-sdk/tensorflow/lite/kernels/tree_ensemble_classifier.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"
#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
#include "edge-impulse-sdk/classifier/ei_model_types.h"
#include "edge-impulse-sdk/classifier/ei_run_dsp.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,16 +101,15 @@ EI_IMPULSE_ERROR run_nn_inference(

if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_TAO_SSD:
case EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET:
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3:
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4:
case EI_CLASSIFIER_LAST_LAYER_FOMO:
case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
out_data_size = impulse->tflite_output_features_count;
break;
}
case EI_CLASSIFIER_LAST_LAYER_SSD: {
ei_printf("ERR: SSD models are not supported using TensorRT \n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
break;
}
default: {
ei_printf(
"ERR: Unsupported object detection last layer (%d)\n",
Expand Down Expand Up @@ -170,35 +169,48 @@ EI_IMPULSE_ERROR run_nn_inference(

if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
fill_res = fill_result_struct_f32_fomo(
impulse,
result,
out_data,
impulse->fomo_output_size,
impulse->fomo_output_size);
break;
}
case EI_CLASSIFIER_LAST_LAYER_SSD: {
ei_printf("ERR: SSD models are not supported using TensorRT \n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
fill_res = fill_result_struct_f32_yolov5(
impulse,
result,
6,
out_data,
impulse->tflite_output_features_count);
break;
}
default: {
ei_printf(
"ERR: Unsupported object detection last layer (%d)\n",
impulse->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
fill_res = fill_result_struct_f32_fomo(
impulse,
result,
out_data,
impulse->fomo_output_size,
impulse->fomo_output_size);
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
fill_res = fill_result_struct_f32_yolov5(
impulse,
result,
6,
out_data,
impulse->tflite_output_features_count);
break;
}
case EI_CLASSIFIER_LAST_LAYER_TAO_SSD:
case EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET: {
fill_res = fill_result_struct_f32_tao_decode_detections(
impulse,
result,
out_data,
impulse->tflite_output_features_count);
break;
}
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3:
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4: {
fill_res = fill_result_struct_f32_tao_yolo(
impulse,
result,
out_data,
impulse->tflite_output_features_count);
break;
}
default: {
ei_printf(
"ERR: Unsupported object detection last layer (%d)\n",
impulse->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
}
}
else {
Expand All @@ -215,7 +227,7 @@ EI_IMPULSE_ERROR run_nn_inference(
}

/**
* Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow)
* Special function to run the classifier on images for quantized models
* that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized'
* returns EI_IMPULSE_OK.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
#include "tensorflow-lite/tensorflow/lite/kernels/register.h"
#include "tensorflow-lite/tensorflow/lite/model.h"
#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/tree_ensemble_classifier.h"
#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"
#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
#include "edge-impulse-sdk/classifier/ei_model_types.h"
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h"
Expand Down

0 comments on commit 446e155

Please sign in to comment.