Skip to content

Commit

Permalink
EI SDK update
Browse files Browse the repository at this point in the history
  • Loading branch information
francovaro committed Feb 1, 2024
1 parent d8f53e7 commit bc9fbaa
Show file tree
Hide file tree
Showing 9 changed files with 128 additions and 73 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pack.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ jobs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./output/*.pack
asset_name: ${{ github.ref }}.pack
asset_content_type: application/zipgh t
asset_content_type: application/zip
4 changes: 2 additions & 2 deletions EdgeImpulse.EI-SDK.pdsc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
<url></url>
<supportContact>[email protected]</supportContact>
<releases>
<release version="1.40.12">
<release version="1.43.2">
EI-SDK
</release>
</releases>
Expand Down Expand Up @@ -55,7 +55,7 @@
</packages>
</requirements>
<components>
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.40.12">
<component Cclass="EdgeImpulse" Cgroup="SDK" Cversion="1.43.2">
<description>Edge Impulse SDK</description>
<!-- short component description -->
<files>
Expand Down
1 change: 1 addition & 0 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
#define EI_CLASSIFIER_IMAGE_SCALING_0_255 1
#define EI_CLASSIFIER_IMAGE_SCALING_TORCH 2
#define EI_CLASSIFIER_IMAGE_SCALING_MIN1_1 3
#define EI_CLASSIFIER_IMAGE_SCALING_MIN128_127 4

struct ei_impulse;

Expand Down
157 changes: 99 additions & 58 deletions edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,11 @@ extern "C" EI_IMPULSE_ERROR run_inference(const ei_impulse_t *impulse, ei_featur
extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(const ei_impulse_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug);
static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr);

#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix);
EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix);
#endif // EI_CLASSIFIER_LOAD_IMAGE_SCALING

/* Private variables ------------------------------------------------------- */

static uint64_t classifier_continuous_features_written = 0;
Expand All @@ -82,62 +87,6 @@ static RecognizeEvents *avg_scores = NULL;
/* These functions (up to Public functions section) are not exposed to end-user,
therefore changes are allowed. */

#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
static const float torch_mean[] = { 0.485, 0.456, 0.406 };
static const float torch_std[] = { 0.229, 0.224, 0.225 };

static EI_IMPULSE_ERROR scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
// @todo; could we write some faster vector math here?
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0];
fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1];
fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2];
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
int scale_res = numpy::scale(fmatrix, 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
int scale_res = numpy::scale(fmatrix, 2.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::subtract(fmatrix, 1.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}

return EI_IMPULSE_OK;
}

static EI_IMPULSE_ERROR unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
// @todo; could we write some faster vector math here?
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0];
fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1];
fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2];
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
return EI_IMPULSE_OK;
}
#endif


/**
* @brief Display the results of the inference
Expand Down Expand Up @@ -201,7 +150,7 @@ extern "C" EI_IMPULSE_ERROR run_inference(
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
// we do not plan to have multiple dsp blocks with image
// so just apply scaling to the first one
EI_IMPULSE_ERROR scale_res = scale_fmatrix(&block, fmatrix[0].matrix);
EI_IMPULSE_ERROR scale_res = ei_scale_fmatrix(&block, fmatrix[0].matrix);
if (scale_res != EI_IMPULSE_OK) {
return scale_res;
}
Expand All @@ -216,7 +165,7 @@ extern "C" EI_IMPULSE_ERROR run_inference(

#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
// undo scaling
scale_res = unscale_fmatrix(&block, fmatrix[0].matrix);
scale_res = ei_unscale_fmatrix(&block, fmatrix[0].matrix);
if (scale_res != EI_IMPULSE_OK) {
return scale_res;
}
Expand Down Expand Up @@ -595,6 +544,98 @@ extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(
/* Thread carefully: public functions are not to be changed
to preserve backwards compatibility. */

#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
static const float torch_mean[] = { 0.485, 0.456, 0.406 };
static const float torch_std[] = { 0.229, 0.224, 0.225 };

EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
// @todo; could we write some faster vector math here?
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0];
fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1];
fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2];
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
int scale_res = numpy::scale(fmatrix, 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) {
int scale_res = numpy::scale(fmatrix, 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::subtract(fmatrix, 128.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
int scale_res = numpy::scale(fmatrix, 2.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::subtract(fmatrix, 1.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}

return EI_IMPULSE_OK;
}

EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
// @todo; could we write some faster vector math here?
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0];
fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1];
fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2];
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) {
int scale_res = numpy::add(fmatrix, 128.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::scale(fmatrix, 1 / 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
int scale_res = numpy::add(fmatrix, 1.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::scale(fmatrix, 1 / 2.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
return EI_IMPULSE_OK;
}
#endif

/**
* @brief Init static vars
*/
Expand Down
2 changes: 1 addition & 1 deletion edgeimpulse/edge-impulse-sdk/porting/ei_logging.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
extern "C"
#endif // defined(__cplusplus) && EI_C_LINKAGE == 1

const char *debug_msgs[] =
__attribute__((unused)) static const char *debug_msgs[] =
{
"NONE", // this one will never show
"ERR",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,10 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
result->is_variable = flatbuffer_tensor.is_variable();

result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers);

// this is useful for debugging
#ifdef EI_LOG_LEVEL && EI_LOG_LEVEL >= 4
result->name = flatbuffer_tensor.name()->c_str();
#endif
// TODO(petewarden): Some of these paths aren't getting enough testing
// coverage, so we should figure out some tests that exercise them.
if (result->data.data == nullptr) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ class MicroGraph {
// to be the subgraph of that operator.
int GetCurrentSubgraphIndex() { return current_subgraph_index_; }

// Set the current subgraph index.
void SetCurrentSubgraphIndex(int subgraph_idx)
{
current_subgraph_index_ = subgraph_idx;
}

// Gets the list of alloctions for each subgraph. This is the source of truth
// for all per-subgraph allocation data.
SubgraphAllocations* GetAllocations() { return subgraph_allocations_; }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -320,16 +320,15 @@ TfLiteTensor* MicroInterpreter::output(size_t index) {
return output_tensors_[index];
}

TfLiteTensor* MicroInterpreter::tensor(size_t index) {
const size_t length = tensors_size();
TfLiteTensor* MicroInterpreter::tensor(size_t index, size_t subgraph_idx) {
const size_t length = tensors_size(subgraph_idx);
if (index >= length) {
MicroPrintf("Tensor index %d out of range (length is %d)", index, length);
return nullptr;
}
return allocator_.AllocatePersistentTfLiteTensor(model_, graph_.GetAllocations(), index, 0);
return allocator_.AllocatePersistentTfLiteTensor(model_, graph_.GetAllocations(), index, subgraph_idx);
}


// Repurposing free subgraphs to reset state for some ops for now
// will reset api is made. See b/220940833#comment25 for more context.
TfLiteStatus MicroInterpreter::Reset() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,10 @@ class MicroInterpreter {
// one external context.
TfLiteStatus SetMicroExternalContext(void* external_context_payload);

size_t tensors_size() const { return model_->subgraphs()->Get(0)->tensors()->size(); }
size_t tensors_size(size_t subgraph_idx = 0) const { return model_->subgraphs()->Get(subgraph_idx)->tensors()->size(); }

TfLiteTensor* tensor(size_t tensor_index, size_t subgraph_idx = 0);

TfLiteTensor* tensor(size_t tensor_index);
template <class T>
T* typed_tensor(int tensor_index) {
if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) {
Expand Down Expand Up @@ -135,13 +136,17 @@ class MicroInterpreter {

TfLiteStatus initialization_status() const { return initialization_status_; }

size_t operators_size() const { return model_->subgraphs()->Get(0)->operators()->size(); }

#ifdef EON_COMPILER_RUN
NodeAndRegistration* node_and_registrations_ = nullptr;

const NodeAndRegistration node_and_registration(int node_index) const {
return node_and_registrations_[node_index];
size_t operators_size(uint32_t subgraph_idx = 0) const
{
return model_->subgraphs()->Get(subgraph_idx)->operators()->size();
}

const NodeAndRegistration node_and_registration(int node_index, int sg)
{
return graph_.GetAllocations()[sg].node_and_registrations[node_index];
}
#endif

Expand Down

0 comments on commit bc9fbaa

Please sign in to comment.