Skip to content

Commit

Permalink
SDK release v1.48.2
Browse files Browse the repository at this point in the history
  • Loading branch information
francovaro committed Mar 18, 2024
1 parent afc5c7a commit d6423f5
Show file tree
Hide file tree
Showing 16 changed files with 277 additions and 148 deletions.
9 changes: 6 additions & 3 deletions EdgeImpulse.EI-SDK.pdsc
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
<name>EI-SDK</name>
<license>LICENSE-apache-2.0.txt</license>
<description>Edge Impulse SDK</description>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.1/</url>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.2/</url>
<supportContact>[email protected]</supportContact>
<repository type="git">https://github.com/edgeimpulse/edge-impulse-sdk-pack.git</repository>
<releases>
<release version="1.48.1" tag="v1.48.1" date="2024-03-14" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.1/EdgeImpulse.EI-SDK.1.48.1.pack">
<release version="1.48.2" tag="v1.48.2" date="2024-03-18" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.2/EdgeImpulse.EI-SDK.1.48.2.pack">
EI-SDK
</release>
<release version="1.48.1" tag="v1.48.1" date="2024-03-14" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.1/EdgeImpulse.EI-SDK.1.48.1.pack">
EI-SDK
</release>
<release version="1.47.2" tag="v1.47.2" date="2024-03-12" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.47.2/EdgeImpulse.EI-SDK.1.47.2.pack">
EI-SDK
</release>
Expand Down Expand Up @@ -86,7 +89,7 @@
</packages>
</requirements>
<components>
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.48.1">
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.48.2">
<description>Edge Impulse SDK</description>
<!-- short component description -->
<files>
Expand Down
4 changes: 2 additions & 2 deletions EdgeImpulse.pidx
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
<index schemaVersion="1.0.0" xs:noNamespaceSchemaLocation="PackIndex.xsd" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance">
<vendor>EdgeImpulse</vendor>
<url>https://raw.githubusercontent.com/edgeimpulse/edge-impulse-sdk-pack/main/</url>
<timestamp>2024-03-14 14:16:31</timestamp>
<timestamp>2024-03-18 16:38:05</timestamp>
<pindex>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.1/" vendor="EdgeImpulse" name="EI-SDK" version="1.48.1"/>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.48.2/" vendor="EdgeImpulse" name="EI-SDK" version="1.48.2"/>
</pindex>
</index>
175 changes: 99 additions & 76 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_fill_result_struct.h

Large diffs are not rendered by default.

5 changes: 2 additions & 3 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,8 @@ typedef struct {
uint8_t output_data_tensor;
uint8_t output_labels_tensor;
uint8_t output_score_tensor;
/* object detection and visual AD */
float threshold;
/* tflite graph params */
bool quantized;
bool compiled;
Expand Down Expand Up @@ -218,10 +220,7 @@ typedef struct ei_impulse {
ei_model_dsp_t *dsp_blocks;

/* object detection */
bool object_detection;
uint16_t object_detection_count;
float object_detection_threshold;
int8_t object_detection_last_layer;
uint32_t fomo_output_size;
uint32_t tflite_output_features_count;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ EI_IMPULSE_ERROR run_nn_inference(
std::vector<float> potentials_v;// = potentials.cast<std::vector<float>>();

// TODO: output conversion depending on output shape?
if (impulse->object_detection == false) {
if (block_config->object_detection == false) {
potentials_v = potentials.squeeze().cast<std::vector<float>>();
}
else {
Expand Down Expand Up @@ -396,11 +396,12 @@ EI_IMPULSE_ERROR run_nn_inference(
engine_info << "Power consumption: " << std::fixed << std::setprecision(2) << active_power << " mW\n";
engine_info << "Inferences per second: " << (1000000 / result->timing.classification_us);

if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
fill_res = fill_result_struct_f32_fomo(
impulse,
block_config,
result,
potentials_v.data(),
impulse->fomo_output_size,
Expand All @@ -409,17 +410,17 @@ EI_IMPULSE_ERROR run_nn_inference(
}
case EI_CLASSIFIER_LAST_LAYER_SSD: {
ei_printf("ERR: MobileNet SSD models are not implemented for Akida (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
ei_printf("ERR: YOLO v5 models are not implemented for Akida (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
default: {
ei_printf("ERR: Unsupported object detection last layer (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
}
Expand Down Expand Up @@ -547,6 +548,9 @@ __attribute__((unused)) int extract_tflite_features(signal_t *signal, matrix_t *
.output_data_tensor = 0,
.output_labels_tensor = 255,
.output_score_tensor = 255,
.threshold = 0,
.quantized = 0,
.compiled = 1,
.graph_config = &ei_config_tflite_graph_0
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,7 @@ EI_IMPULSE_ERROR run_gmm_anomaly(
.output_data_tensor = 0,
.output_labels_tensor = 0,
.output_score_tensor = 0,
.threshold = block_config->anomaly_threshold,
.quantized = 0,
.compiled = 0,
.graph_config = block_config->graph_config
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -455,10 +455,12 @@ EI_IMPULSE_ERROR drpai_close(uint32_t input_frame_size) {
#if ((EI_CLASSIFIER_OBJECT_DETECTION == 1) && (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI))
EI_IMPULSE_ERROR drpai_run_yolov5_postprocessing(
const ei_impulse_t *impulse,
ei_learning_block_config_tflite_graph_t *block_config,
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false)
{

static std::unique_ptr<tflite::FlatBufferModel> model = nullptr;
static std::unique_ptr<tflite::Interpreter> interpreter = nullptr;

Expand Down Expand Up @@ -564,7 +566,7 @@ EI_IMPULSE_ERROR drpai_run_yolov5_postprocessing(
// }
// printf("\n");

return fill_result_struct_f32_yolov5(impulse, result, 5, out_data, out_size);
return fill_result_struct_f32_yolov5(impulse, block_config, result, 5, out_data, out_size);
}
#endif

Expand Down Expand Up @@ -602,6 +604,8 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
void *config_ptr,
bool debug = false)
{
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr;

// this needs to be changed for multi-model, multi-impulse
static bool first_run = true;
uint64_t ctx_start_us;
Expand Down Expand Up @@ -678,8 +682,8 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(

EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK;

if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
if (debug) {
ei_printf("DEBUG: raw drpai output");
Expand All @@ -693,6 +697,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(

fill_res = fill_result_struct_f32_fomo(
impulse,
block_config,
result,
drpai_output_buf,
impulse->fomo_output_size,
Expand All @@ -701,7 +706,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
}
case EI_CLASSIFIER_LAST_LAYER_SSD: {
ei_printf("ERR: MobileNet SSD models are not implemented for DRP-AI (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: {
Expand All @@ -723,7 +728,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(

#if ((EI_CLASSIFIER_OBJECT_DETECTION == 1) && (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI))
// do post processing
fill_res = drpai_run_yolov5_postprocessing(impulse, signal, result, debug);
fill_res = drpai_run_yolov5_postprocessing(impulse, block_config, signal, result, debug);
#endif

#endif
Expand All @@ -732,7 +737,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
}
default: {
ei_printf("ERR: Unsupported object detection last layer (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,8 @@ EI_IMPULSE_ERROR run_nn_inference(
void *config_ptr,
bool debug = false)
{
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr;

memx_status status = MEMX_STATUS_OK;
int32_t ifmap_height, ifmap_width, ifmap_channel_number, ifmap_format;
int32_t ofmap_height, ofmap_width, ofmap_channel_number, ofmap_format;
Expand Down Expand Up @@ -311,12 +313,13 @@ EI_IMPULSE_ERROR run_nn_inference(
tflite::reference_ops::Softmax(dummy_params, softmax_shape, ofmap, softmax_shape, ofmap);

// handle inference outputs
if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
ei_printf("FOMO executed on Memryx\n");
fill_result_struct_f32_fomo(
impulse,
block_config,
result,
ofmap,
impulse->fomo_output_size,
Expand All @@ -329,7 +332,7 @@ EI_IMPULSE_ERROR run_nn_inference(
}
default: {
ei_printf("ERR: Unsupported object detection last layer (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
}
Expand All @@ -353,6 +356,8 @@ EI_IMPULSE_ERROR run_nn_inference(
void *config_ptr,
bool debug = false)
{
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr;

// init Python embedded interpreter (should be called once!)
static py::scoped_interpreter guard{};

Expand Down Expand Up @@ -420,7 +425,7 @@ EI_IMPULSE_ERROR run_nn_inference(

potentials = outputs.squeeze().cast<py::array_t<float>>();

if (impulse->object_detection == false) {
if (block_config->object_detection == false) {
potentials_v = outputs.squeeze().cast<std::vector<float>>();
}
else {
Expand All @@ -439,12 +444,13 @@ EI_IMPULSE_ERROR run_nn_inference(
ei_printf("Memryx raw output:\n%s\n", ret_str.c_str());
}

if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
ei_printf("FOMO executed on Memryx\n");
fill_result_struct_f32_fomo(
impulse,
block_config,
result,
potentials_v.data(),
impulse->fomo_output_size,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,7 @@ static EI_IMPULSE_ERROR inference_onnx_setup(
* @return EI_IMPULSE_OK if successful
*/
static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
void *config_ptr,
uint64_t ctx_start_us,
std::vector<Ort::Value>* input_tensors,
std::vector<Ort::Value>* output_tensors,
Expand All @@ -356,6 +357,8 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
ei_impulse_result_t *result,
bool debug) {

ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr;

session->Run(*run_options, *binding);

uint64_t ctx_end_us = ei_read_timer_us();
Expand All @@ -381,8 +384,8 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK;

// NOTE: for now only yolox object detection supported
if (impulse->object_detection) {
switch (impulse->object_detection_last_layer) {
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_YOLOX: {
#if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
ei_printf("ERR: YOLOX does not support quantized inference\n");
Expand All @@ -399,6 +402,7 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
}
fill_res = fill_result_struct_f32_yolox_detect(
impulse,
block_config,
result,
(float*)out_data,
output_tensor_features_count);
Expand All @@ -407,7 +411,7 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
}
default: {
ei_printf("ERR: Unsupported object detection last layer (%d)\n",
impulse->object_detection_last_layer);
block_config->object_detection_last_layer);
break;
}
}
Expand Down Expand Up @@ -549,6 +553,7 @@ EI_IMPULSE_ERROR run_nn_inference(

ctx_start_us = ei_read_timer_us();
EI_IMPULSE_ERROR run_res = inference_onnx_run(impulse,
config_ptr,
ctx_start_us,
&input_tensors,
&output_tensors,
Expand Down Expand Up @@ -678,6 +683,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(

ctx_start_us = ei_read_timer_us();
EI_IMPULSE_ERROR run_res = inference_onnx_run(impulse,
config_ptr,
ctx_start_us,
&input_tensors,
&output_tensors,
Expand Down
Loading

0 comments on commit d6423f5

Please sign in to comment.