Skip to content

Commit

Permalink
updated for EI-SDK v1.46.4
Browse files Browse the repository at this point in the history
  • Loading branch information
francovaro committed Mar 5, 2024
1 parent 446e155 commit f3b509c
Show file tree
Hide file tree
Showing 8 changed files with 360 additions and 63 deletions.
9 changes: 6 additions & 3 deletions EdgeImpulse.EI-SDK.pdsc
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
<name>EI-SDK</name>
<license>LICENSE-apache-2.0.txt</license>
<description>Edge Impulse SDK</description>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/tag/v1.46.1</url>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.46.4/</url>
<supportContact>[email protected]</supportContact>
<repository type="git">https://github.com/edgeimpulse/edge-impulse-sdk-pack.git</repository>
<releases>
<release version="1.46.1" tag="v1.46.1" date="2024-02-27" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.46.1/EdgeImpulse.EI-SDK.1.46.1.pack">
<release version="1.46.4" tag="v1.46.4" date="2024-03-05" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.46.4/EdgeImpulse.EI-SDK.1.46.4.pack">
EI-SDK
</release>
<release version="1.46.1" tag="v1.46.1" date="2024-02-27" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.46.1/EdgeImpulse.EI-SDK.1.46.1.pack">
EI-SDK
</release>
<release version="1.45.6" tag="v1.45.6" date="2024-02-26" url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.45.6/EdgeImpulse.EI-SDK.1.45.6.pack">
EI-SDK
</release>
Expand Down Expand Up @@ -71,7 +74,7 @@
</packages>
</requirements>
<components>
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.46.1">
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.46.4">
<description>Edge Impulse SDK</description>
<!-- short component description -->
<files>
Expand Down
6 changes: 3 additions & 3 deletions EdgeImpulse.pidx
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
<?xml version="1.0" encoding="UTF-8" ?>
<index schemaVersion="1.0.0" xs:noNamespaceSchemaLocation="PackIndex.xsd" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance">
<vendor>EdgeImpulse</vendor>
<url>https://github.com/edgeimpulse/edge-impulse-sdk-pack/</url>
<timestamp>2024-02-27 11:41:30</timestamp>
<url>https://raw.githubusercontent.com/edgeimpulse/edge-impulse-sdk-pack/main/</url>
<timestamp>2024-03-05 14:53:30</timestamp>
<pindex>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/download/tag/v1.46.1" vendor="EdgeImpulse" name="EI-SDK" version="1.46.1"/>
<pdsc url="https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.46.4/" vendor="EdgeImpulse" name="EI-SDK" version="1.46.4"/>
</pindex>
</index>
214 changes: 192 additions & 22 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_fill_result_struct.h

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,11 @@ typedef struct {
void* graph_config;
} ei_learning_block_config_anomaly_gmm_t;

typedef struct {
float confidence_threshold;
float iou_threshold;
} ei_object_detection_nms_config_t;

typedef struct ei_impulse {
/* project details */
uint32_t project_id;
Expand Down Expand Up @@ -234,6 +239,7 @@ typedef struct ei_impulse {
uint16_t label_count;
const ei_model_performance_calibration_t calibration;
const char **categories;
ei_object_detection_nms_config_t object_detection_nms;
} ei_impulse_t;

typedef struct {
Expand Down
24 changes: 21 additions & 3 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_nms.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,9 @@ static inline void NonMaxSuppression(const float* boxes, const int num_boxes,
/**
* Run non-max suppression over the results array (for bounding boxes)
*/
EI_IMPULSE_ERROR ei_run_nms(std::vector<ei_impulse_result_bounding_box_t> *results) {
EI_IMPULSE_ERROR ei_run_nms(
const ei_impulse_t *impulse,
std::vector<ei_impulse_result_bounding_box_t> *results) {

size_t bb_count = 0;
for (size_t ix = 0; ix < results->size(); ix++) {
Expand Down Expand Up @@ -257,8 +259,8 @@ EI_IMPULSE_ERROR ei_run_nms(std::vector<ei_impulse_result_bounding_box_t> *resul
bb_count, // num_boxes
(const float*)scores, // scores
bb_count, // max_output_size
0.2f, // iou_threshold
0.0f, // score_threshold
impulse->object_detection_nms.iou_threshold, // iou_threshold
impulse->object_detection_nms.confidence_threshold, // score_threshold
0.0f, // soft_nms_sigma
selected_indices,
selected_scores,
Expand Down Expand Up @@ -293,6 +295,22 @@ EI_IMPULSE_ERROR ei_run_nms(std::vector<ei_impulse_result_bounding_box_t> *resul
ei_free(selected_scores);

return EI_IMPULSE_OK;

}

/**
* Run non-max suppression over the results array (for bounding boxes)
*/
EI_IMPULSE_ERROR ei_run_nms(std::vector<ei_impulse_result_bounding_box_t> *results) {
#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1
const ei_impulse_t impulse = ei_default_impulse;
#else
const ei_impulse_t impulse = {
.object_detection_nms.confidence_threshold = 0.0f,
.object_detection_nms.iou_threshold = 0.2f
};
#endif
return ei_run_nms(&impulse, results);
}

#endif // #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_SSD) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4)
Expand Down
2 changes: 2 additions & 0 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ extern "C" EI_IMPULSE_ERROR process_impulse(const ei_impulse_t *impulse,
// smart pointer to features array
std::unique_ptr<ei_feature_t[]> features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();
memset(features, 0, sizeof(ei_feature_t) * block_num);

// have it outside of the loop to avoid going out of scope
std::unique_ptr<ei::matrix_t> *matrix_ptrs = new std::unique_ptr<ei::matrix_t>[block_num];
Expand Down Expand Up @@ -400,6 +401,7 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impul
// smart pointer to features array
std::unique_ptr<ei_feature_t[]> features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();
memset(features, 0, sizeof(ei_feature_t) * block_num);

// have it outside of the loop to avoid going out of scope
std::unique_ptr<ei::matrix_t> *matrix_ptrs = new std::unique_ptr<ei::matrix_t>[block_num];
Expand Down
120 changes: 93 additions & 27 deletions edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,20 @@
#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"

#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <string>
#include <filesystem>
#include <stdlib.h>
#include "tflite/linux-jetson-nano/libeitrt.h"

#if __APPLE__
#include <mach-o/dyld.h>
#else
#include <linux/limits.h>
#endif

EiTrt *ei_trt_handle = NULL;

inline bool file_exists(char *model_file_name)
Expand Down Expand Up @@ -67,34 +78,79 @@ EI_IMPULSE_ERROR run_nn_inference(
#error "TensorRT requires an unquantized network"
#endif

static char model_file_name[128];
snprintf(
model_file_name,
128,
"/tmp/%s-%d-%d.engine",
impulse->project_name,
impulse->project_id,
impulse->deploy_version);
static char current_exe_path[PATH_MAX] = { 0 };

#if __APPLE__
uint32_t len = PATH_MAX;
if (_NSGetExecutablePath(current_exe_path, &len) != 0) {
current_exe_path[0] = '\0'; // buffer too small
}
else {
// resolve symlinks, ., .. if possible
char *canonical_path = realpath(current_exe_path, NULL);
if (canonical_path != NULL)
{
strncpy(current_exe_path, canonical_path, len);
free(canonical_path);
}
}
#else
int readlink_res = readlink("/proc/self/exe", current_exe_path, PATH_MAX);
if (readlink_res < 0) {
printf("readlink_res = %d\n", readlink_res);
current_exe_path[0] = '\0'; // failed to find location
}
#endif

static char model_file_name[PATH_MAX];

if (strlen(current_exe_path) == 0) {
// could not determine current exe path, use /tmp for the engine file
snprintf(
model_file_name,
PATH_MAX,
"/tmp/ei-%d-%d.engine",
impulse->project_id,
impulse->deploy_version);
}
else {
std::filesystem::path p(current_exe_path);
snprintf(
model_file_name,
PATH_MAX,
"%s/%s-project%d-v%d.engine",
p.parent_path().c_str(),
p.stem().c_str(),
impulse->project_id,
impulse->deploy_version);
}

static bool first_run = true;

static bool first_run = !file_exists(model_file_name);
if (first_run) {
ei_printf("INFO: Model file '%s' does not exist, creating now. \n", model_file_name);

FILE *file = fopen(model_file_name, "w");
if (!file) {
ei_printf("ERR: TensorRT init failed to open '%s'\n", model_file_name);
return EI_IMPULSE_TENSORRT_INIT_FAILED;
}
bool fexists = file_exists(model_file_name);
if (!fexists) {
ei_printf("INFO: Model file '%s' does not exist, creating...\n", model_file_name);

if (fwrite(graph_config->model, graph_config->model_size, 1, file) != 1) {
ei_printf("ERR: TensorRT init fwrite failed.\n");
return EI_IMPULSE_TENSORRT_INIT_FAILED;
}
FILE *file = fopen(model_file_name, "w");
if (!file) {
ei_printf("ERR: TensorRT init failed to open '%s'\n", model_file_name);
return EI_IMPULSE_TENSORRT_INIT_FAILED;
}

if (fwrite(graph_config->model, graph_config->model_size, 1, file) != 1) {
ei_printf("ERR: TensorRT init fwrite failed.\n");
return EI_IMPULSE_TENSORRT_INIT_FAILED;
}

if (fclose(file) != 0) {
ei_printf("ERR: TensorRT init fclose failed.\n");
return EI_IMPULSE_TENSORRT_INIT_FAILED;
if (fclose(file) != 0) {
ei_printf("ERR: TensorRT init fclose failed.\n");
return EI_IMPULSE_TENSORRT_INIT_FAILED;
}
}

first_run = false;
}

uint32_t out_data_size = 0;
Expand All @@ -106,7 +162,8 @@ EI_IMPULSE_ERROR run_nn_inference(
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3:
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4:
case EI_CLASSIFIER_LAST_LAYER_FOMO:
case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
case EI_CLASSIFIER_LAST_LAYER_YOLOV5:
case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: {
out_data_size = impulse->tflite_output_features_count;
break;
}
Expand Down Expand Up @@ -178,11 +235,14 @@ EI_IMPULSE_ERROR run_nn_inference(
impulse->fomo_output_size);
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
case EI_CLASSIFIER_LAST_LAYER_YOLOV5:
case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: {
int version = impulse->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI ?
5 : 6;
fill_res = fill_result_struct_f32_yolov5(
impulse,
result,
6,
version,
out_data,
impulse->tflite_output_features_count);
break;
Expand All @@ -197,8 +257,14 @@ EI_IMPULSE_ERROR run_nn_inference(
break;
}
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3:
fill_res = fill_result_struct_f32_tao_yolov3(
impulse,
result,
out_data,
impulse->tflite_output_features_count);
break;
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4: {
fill_res = fill_result_struct_f32_tao_yolo(
fill_res = fill_result_struct_f32_tao_yolov4(
impulse,
result,
out_data,
Expand Down Expand Up @@ -242,4 +308,4 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
}

#endif // #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_
#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_
Original file line number Diff line number Diff line change
Expand Up @@ -426,11 +426,43 @@ EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite(
}
break;
}
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3:
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3: {

if (output->type == kTfLiteInt8) {
fill_res = fill_result_struct_quantized_tao_yolov3(
impulse,
result,
output->data.int8,
output->params.zero_point,
output->params.scale,
impulse->tflite_output_features_count);
}
else if (output->type == kTfLiteUInt8) {
fill_res = fill_result_struct_quantized_tao_yolov3(
impulse,
result,
output->data.uint8,
output->params.zero_point,
output->params.scale,
impulse->tflite_output_features_count);
}
else if (output->type == kTfLiteFloat32) {
fill_res = fill_result_struct_f32_tao_yolov3(
impulse,
result,
output->data.f,
impulse->tflite_output_features_count);
}
else {
ei_printf("ERR: Invalid output type (%d) for TAO YOLOv3 layer\n", output->type);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
break;
}
case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4: {

if (output->type == kTfLiteInt8) {
fill_res = fill_result_struct_quantized_tao_yolo(
fill_res = fill_result_struct_quantized_tao_yolov4(
impulse,
result,
output->data.int8,
Expand All @@ -439,7 +471,7 @@ EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite(
impulse->tflite_output_features_count);
}
else if (output->type == kTfLiteUInt8) {
fill_res = fill_result_struct_quantized_tao_yolo(
fill_res = fill_result_struct_quantized_tao_yolov4(
impulse,
result,
output->data.uint8,
Expand All @@ -448,14 +480,14 @@ EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite(
impulse->tflite_output_features_count);
}
else if (output->type == kTfLiteFloat32) {
fill_res = fill_result_struct_f32_tao_yolo(
fill_res = fill_result_struct_f32_tao_yolov4(
impulse,
result,
output->data.f,
impulse->tflite_output_features_count);
}
else {
ei_printf("ERR: Invalid output type (%d) for TAO last layer\n", output->type);
ei_printf("ERR: Invalid output type (%d) for TAO YOLOv4 layer\n", output->type);
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
break;
Expand Down

0 comments on commit f3b509c

Please sign in to comment.