From 6f1a46c0fd443646788f7a040043059e50d42c86 Mon Sep 17 00:00:00 2001
From: lionkun <871518554@qq.com>
Date: Tue, 27 Oct 2020 14:23:45 +0800
Subject: [PATCH 1/4] Add the OpenVINO.js framework in the example for image
classification, semantic_segmentation, speech_commands, and
speech_recognition. It only works when the Electron Application with
integrated node.js.
---
.../ImageClassificationExample.js | 5 +
.../ImageClassificationOpenVINORunner.js | 17 +
examples/image_classification/index.html | 11 +
examples/image_classification/main.js | 6 +
examples/index.html | 9 +
.../SemanticSegmentationExample.js | 3 +
.../SemanticSegmentationOpenVINORunner.js | 17 +
examples/semantic_segmentation/index.html | 11 +
.../speech_commands/SpeechCommandsExample.js | 9 +-
examples/speech_commands/index.html | 11 +
.../SpeechRecognitionExample.js | 9 +-
.../SpeechRecognitionOpenVINORunner.js | 22 ++
examples/speech_recognition/index.html | 12 +
examples/static/js/ui.common.js | 2 +-
examples/util/BaseExample.js | 47 ++-
examples/util/OpenVINORunner.js | 318 ++++++++++++++++++
examples/util/modelZoo.js | 34 +-
.../util/openvino/OpenVINOModelImporter.js | 3 +
package.json | 6 +-
19 files changed, 530 insertions(+), 22 deletions(-)
create mode 100644 examples/image_classification/ImageClassificationOpenVINORunner.js
create mode 100644 examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js
create mode 100644 examples/speech_recognition/SpeechRecognitionOpenVINORunner.js
create mode 100644 examples/util/OpenVINORunner.js
diff --git a/examples/image_classification/ImageClassificationExample.js b/examples/image_classification/ImageClassificationExample.js
index e3b8618b8..442f67059 100644
--- a/examples/image_classification/ImageClassificationExample.js
+++ b/examples/image_classification/ImageClassificationExample.js
@@ -20,6 +20,8 @@ class ImageClassificationExample extends BaseCameraExample {
case 'OpenCV.js':
runner = new ImageClassificationOpenCVRunner();
break;
+ case 'OpenVINO.js':
+ runner = new ImageClassificationOpenVINORunner();
}
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
@@ -28,6 +30,7 @@ class ImageClassificationExample extends BaseCameraExample {
/** @override */
_processExtra = (output) => {
let labelClasses;
+ console.log(output);
switch (this._currentFramework) {
case 'WebNN':
const deQuantizeParams = this._runner.getDeQuantizeParams();
@@ -36,6 +39,8 @@ class ImageClassificationExample extends BaseCameraExample {
case 'OpenCV.js':
labelClasses = getTopClasses(output.tensor, output.labels, 3);
break;
+ case 'OpenVINO.js':
+ labelClasses = getTopClasses(output.tensor, output.labels, 3);
}
$('#inferenceresult').show();
labelClasses.forEach((c, i) => {
diff --git a/examples/image_classification/ImageClassificationOpenVINORunner.js b/examples/image_classification/ImageClassificationOpenVINORunner.js
new file mode 100644
index 000000000..7e88ee6e6
--- /dev/null
+++ b/examples/image_classification/ImageClassificationOpenVINORunner.js
@@ -0,0 +1,17 @@
+class ImageClassificationOpenVINORunner extends OpenVINORunner {
+ constructor() {
+ super();
+ }
+
+ /** @override */
+ _getOutputTensor = () => {
+ const postSoftmax = this._postOptions.softmax || false;
+ let outputTensor;
+ if(postSoftmax) {
+ outputTensor = softmax(this._output);
+ } else {
+ outputTensor = this._output;
+ }
+ return outputTensor;
+ };
+}
\ No newline at end of file
diff --git a/examples/image_classification/index.html b/examples/image_classification/index.html
index 9538964ba..345cdb2ac 100755
--- a/examples/image_classification/index.html
+++ b/examples/image_classification/index.html
@@ -371,6 +371,15 @@
Subgraphs Summary
+
@@ -390,6 +399,7 @@ Subgraphs Summary
+
@@ -408,6 +418,7 @@ Subgraphs Summary
+
diff --git a/examples/image_classification/main.js b/examples/image_classification/main.js
index 3bf693944..d8c925df1 100644
--- a/examples/image_classification/main.js
+++ b/examples/image_classification/main.js
@@ -1,6 +1,7 @@
const example = new ImageClassificationExample({model: imageClassificationModels});
let specialoffer = () => {
+ //http://localhost:8080/examples/image_classification_opencv/?prefer=none&b=WASM&m=resnet50_v1_openvino&s=image&d=0&f=WebNN
let f = parseSearchParams('f')
let url = location.href.replace('image_classification/', 'image_classification_opencv/')
let urlimg = url.replace('s=camera', 's=image')
@@ -27,6 +28,11 @@ $(document).ready(() => {
$('#frameworkWebNN').click(function() {
$('#opencvspecial').hide()
})
+
+ $('#frameworkOpenVINOjs').click(function() {
+ $('#opencvspecial').hide()
+ })
+
});
$(window).load(() => {
diff --git a/examples/index.html b/examples/index.html
index 6f119752d..b2437ea28 100644
--- a/examples/index.html
+++ b/examples/index.html
@@ -496,6 +496,15 @@
+
diff --git a/examples/semantic_segmentation/SemanticSegmentationExample.js b/examples/semantic_segmentation/SemanticSegmentationExample.js
index 44dce4306..ae67e69a1 100644
--- a/examples/semantic_segmentation/SemanticSegmentationExample.js
+++ b/examples/semantic_segmentation/SemanticSegmentationExample.js
@@ -149,6 +149,9 @@ class SemanticSegmentationExample extends BaseCameraExample {
case 'OpenCV.js':
runner = new SemanticSegmentationOpenCVRunner();
break;
+ case 'OpenVINO.js':
+ runner = new SemanticSegmentationOpenVINORunner();
+ break;
}
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
diff --git a/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js b/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js
new file mode 100644
index 000000000..8ad0f59d5
--- /dev/null
+++ b/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js
@@ -0,0 +1,17 @@
+class SemanticSegmentationOpenVINORunner extends OpenVINORunner {
+ constructor() {
+ super();
+ }
+
+ /** @override */
+ _getOutputTensorTypedArray = () => {
+ return Int32Array;
+ };
+
+ /** @override */
+ _getOutputTensor = () => {
+ let outputTensor = this._output;
+ return outputTensor;
+ };
+
+}
\ No newline at end of file
diff --git a/examples/semantic_segmentation/index.html b/examples/semantic_segmentation/index.html
index 06439d004..04cfa6e89 100644
--- a/examples/semantic_segmentation/index.html
+++ b/examples/semantic_segmentation/index.html
@@ -390,6 +390,15 @@ Subgraphs Summary
+
@@ -411,6 +420,7 @@ Subgraphs Summary
+
@@ -436,6 +446,7 @@ Subgraphs Summary
+
diff --git a/examples/speech_commands/SpeechCommandsExample.js b/examples/speech_commands/SpeechCommandsExample.js
index ee0359996..c0334f6a3 100644
--- a/examples/speech_commands/SpeechCommandsExample.js
+++ b/examples/speech_commands/SpeechCommandsExample.js
@@ -33,7 +33,14 @@ class SpeechCommandsExample extends BaseMircophoneExample {
/** @override */
_createRunner = () => {
- const runner = new WebNNRunner();
+ let runner;
+ switch (this._currentFramework) {
+ case 'WebNN':
+ runner = new WebNNRunner();
+ break;
+ case 'OpenVINO.js':
+ runner = new OpenVINORunner();
+ }
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
};
diff --git a/examples/speech_commands/index.html b/examples/speech_commands/index.html
index 0c08924b3..bcbe4c3c6 100755
--- a/examples/speech_commands/index.html
+++ b/examples/speech_commands/index.html
@@ -402,6 +402,16 @@ Subgraphs Summary
+
+
@@ -420,6 +430,7 @@ Subgraphs Summary
+
diff --git a/examples/speech_recognition/SpeechRecognitionExample.js b/examples/speech_recognition/SpeechRecognitionExample.js
index 9316cd73f..114fbd02a 100644
--- a/examples/speech_recognition/SpeechRecognitionExample.js
+++ b/examples/speech_recognition/SpeechRecognitionExample.js
@@ -19,7 +19,14 @@ class SpeechRecognitionExample extends BaseMircophoneExample {
/** @override */
_createRunner = () => {
- const runner = new SpeechRecognitionRunner();
+ let runner;
+ switch (this._currentFramework) {
+ case 'WebNN':
+ runner = new SpeechRecognitionRunner();
+ break;
+ case 'OpenVINO.js':
+ runner = new SpeechRecognitionOpenVINORunner();
+ }
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
};
diff --git a/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js b/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js
new file mode 100644
index 000000000..f5e2693bf
--- /dev/null
+++ b/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js
@@ -0,0 +1,22 @@
+class SpeechRecognitionOpenVINORunner extends OpenVINORunner {
+ constructor() {
+ super();
+ }
+
+ _getInputTensor = (input) => {
+ let infer_req = this._execNet.createInferRequest();
+ const input_blob = infer_req.getBlob(this._inputInfo.name());
+ const input_data = new Float32Array(input_blob.wmap());
+
+ for(let index = 0; index < input.length; index++) {
+ input_data[index] = input[index];
+ }
+ input_blob.unmap();
+ this._inferReq = infer_req;
+ };
+
+ _getOutputTensor = () => {
+ let outputTensor = this._output;
+ return outputTensor;
+ };
+ }
\ No newline at end of file
diff --git a/examples/speech_recognition/index.html b/examples/speech_recognition/index.html
index 9618dc050..31ca634ae 100755
--- a/examples/speech_recognition/index.html
+++ b/examples/speech_recognition/index.html
@@ -302,6 +302,16 @@ Subgraphs Summary
+
+
@@ -320,6 +330,7 @@ Subgraphs Summary
+
@@ -330,6 +341,7 @@ Subgraphs Summary
+
diff --git a/examples/static/js/ui.common.js b/examples/static/js/ui.common.js
index 97927fa9d..ced2497ac 100644
--- a/examples/static/js/ui.common.js
+++ b/examples/static/js/ui.common.js
@@ -176,7 +176,7 @@ const singleModelTable = (modelList, category) => {
const setModelComponents = (models, selectedModelIdStr) => {
$('.model').remove();
let formatTypes = [];
-
+ console.log(models);
for (let [category, modelList] of Object.entries(models)) {
let formats = singleModelTable(modelList, category);
formatTypes.push(...formats);
diff --git a/examples/util/BaseExample.js b/examples/util/BaseExample.js
index 6841916ab..1b7e31deb 100644
--- a/examples/util/BaseExample.js
+++ b/examples/util/BaseExample.js
@@ -14,8 +14,9 @@ class BaseExample extends BaseApp {
this._track = null;
this._stats = new Stats();
this._currentModelInfo = {};
- this._currentFramework; //'OpenCV.js' | 'WebNN'
+ this._currentFramework; //'OpenCV.js' | 'WebNN' | 'OpenVINO.js'
this._currentOpenCVJSBackend; // 'WASM' | 'SIMD' | 'Threads' | 'Threads+SIMD'
+ this._currentOpenVINOJSBackend;// 'WASM' | 'WebGL' | 'WebML' | 'WebGPU'
this._runtimeInitialized = false; // for 'OpenCV.js', always true for other framework
this._currentTimeoutId = 0;
this._isStreaming = false; // for inference camera video
@@ -152,6 +153,15 @@ class BaseExample extends BaseApp {
this._currentOpenCVJSBackend = backend;
};
+ /**
+ * This method is to set '_currentOpenVINOJSBackend'.
+ * @param {string} backend A string that for OpenCV.js backend.
+ */
+ _setOpenVINOJSBackend = (backend) => {
+ this._currentOpenVINOJSBackend = backend;
+ };
+
+
/**
* This method is to set '_runtimeInitialized'.
* @param {boolean} flag A boolean that for whether OpenCV.js runtime initialized.
@@ -227,6 +237,14 @@ class BaseExample extends BaseApp {
}
locSearch = `?b=${this._currentOpenCVJSBackend}&m=${this._currentModelId}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
break;
+ case 'OpenVINO.js':
+ $('.backend').hide();
+ $('.opencvjsbackend').hide();
+ const openVINObackend = parseSearchParams('b');
+ this._setOpenVINOJSBackend(openVINObackend)
+ this._setRuntimeInitialized(true);
+ locSearch = `?m=${this.openVINObackend}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
+ break;
}
}
} else {
@@ -237,6 +255,9 @@ class BaseExample extends BaseApp {
case 'OpenCV.js':
locSearch = `?b=${this._currentOpenCVJSBackend}&m=${this._currentModelId}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
break;
+ case 'OpenVINO.js':
+ locSearch = `?b=${this._currentOpenVINOJSBackend}&m=${this._currentModelId}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
+ break;
}
}
window.history.pushState(null, null, locSearch);
@@ -283,6 +304,10 @@ class BaseExample extends BaseApp {
$('.opencvjsbackend').show();
updateOpenCVJSBackendComponentsStyle(this._currentOpenCVJSBackend);
break;
+ case 'OpenVINO.js':
+ $('.backend').hide();
+ $('.opencvjsbackend').hide();
+ break;
}
updateSIMDNotes();
};
@@ -374,6 +399,22 @@ class BaseExample extends BaseApp {
this._runner = null;
this._resetOutput();
this.main();
+ } else if (framework === 'OpenVINO.js') {
+
+
+
+ $('.opencvjsbackend').hide();
+ $('.backend').show();
+ $('#progressruntime').hide();
+ this._setFramework(framework);
+ this._setRuntimeInitialized(true);
+ this._updateHistoryEntryURL();
+ this._showDynamicComponents();
+ this._modelClickBinding();
+ this._runner = null;
+ this._resetOutput();
+ updateTitleComponent(this._currentBackend, null, this._currentModelId, this._inferenceModels);
+ this.main();
}
});
@@ -824,6 +865,7 @@ class BaseExample extends BaseApp {
* then shows the post processing of inference result on UI.
*/
main = async () => {
+ console.log(this._runtimeInitialized);
if (!this._runtimeInitialized) {
console.log(`Runtime isn't initialized`);
return;
@@ -834,7 +876,10 @@ class BaseExample extends BaseApp {
updateTitleComponent(this._currentBackend, this._currentPrefer, this._currentModelId, this._inferenceModels);
} else if (this._currentFramework === 'OpenCV.js') {
updateTitleComponent(this._currentOpenCVJSBackend, null, this._currentModelId, this._inferenceModels);
+ } else if (this._currentFramework === 'OpenVINO.js') {
+ updateTitleComponent(this._currentBackend, null, this._currentModelId, this._inferenceModels);
}
+ console.log(location.search);
if (this._currentModelId === 'none') {
showErrorComponent('No model selected', 'Please select model to start prediction.');
diff --git a/examples/util/OpenVINORunner.js b/examples/util/OpenVINORunner.js
new file mode 100644
index 000000000..fe5de90b8
--- /dev/null
+++ b/examples/util/OpenVINORunner.js
@@ -0,0 +1,318 @@
+var ie = null;
+
+try {
+ ie = require("inference-engine-node")
+} catch(e) {
+ console.log(e);
+}
+
+class OpenVINORunner extends BaseRunner {
+ constructor() {
+ super();
+ this._output = null;
+ this._rawModel = null;
+ this._ieCore = null;
+ this._network = null;
+ this._execNet = null;
+ this._tensor = null;
+ this._postOptions = null;
+ this._deQuantizeParams = null;
+ this._inputInfo = null;
+ this._outputInfo = null;
+ if (ie !== null) {
+ console.log("Create Core");
+ this._ieCore = ie.createCore();
+ }
+ }
+
+ _setDeQuantizeParams = (params) => {
+ this._deQuantizeParams = params;
+ };
+
+ /** @override */
+ _loadModelFile = async (url) => {
+ if (this._ieCore !== null) {
+ if (url !== undefined) {
+ const arrayBuffer = await this._loadURL(url, this._progressHandler, true);
+ const bytes = new Uint8Array(arrayBuffer);
+ switch (url.split('.').pop()) {
+ case 'bin':
+ const networkURL = url.replace(/bin$/, 'xml');
+ const networkFile = await this._loadURL(networkURL);
+ const weightsBuffer = bytes.buffer;
+ var network = await this._ieCore.readNetworkFromData(networkFile, weightsBuffer);
+ var inputs_info = network.getInputsInfo();
+ var outputs_info = network.getOutputsInfo();
+ this._network = network;
+ this._inputInfo = inputs_info[0];
+ let dims = this._inputInfo.getDims();
+ console.log(dims);
+ if (dims.length === 4) {
+ this._inputInfo.setLayout('nhwc');
+ }
+ this._outputInfo = outputs_info[0];
+ }
+ this._setLoadedFlag(true);
+ console.log("Load model successfully");
+ } else {
+ throw new Error(`There's none model file info, please check config info of modelZoo.`);
+ }
+ } else {
+ throw new Error(`The infernece-engine-node is not worked, please check the Node.js platform is enabled`);
+ }
+ };
+
+ /** @override */
+ doInitialization = (modelInfo) => {
+ this._setLoadedFlag(false);
+ this._setInitializedFlag(false);
+ this._setModelInfo(modelInfo);
+ this._setDeQuantizeParams([]);
+ };
+
+ /** @override */
+ _doCompile = async (options) => {
+ const modelFormat = this._currentModelInfo.format;
+ if (modelFormat === 'OpenVINO') {
+ let exec_net = await this._ieCore.loadNetwork(this._network, "CPU");
+ this._execNet = exec_net;
+
+ this._postOptions = this._currentModelInfo.postOptions || {};
+
+ } else {
+ throw new Error(`Unsupported '${this._currentModelInfo.format}' input`);
+ }
+ }
+
+ getDeQuantizeParams = () => {
+ return this._deQuantizeParams;
+ };
+
+ _getTensor = (input) => {
+ const image = input.src;
+ const options = input.options;
+ // var tensor = this._inputTensor[0];
+
+ image.width = image.videoWidth || image.naturalWidth;
+ image.height = image.videoHeight || image.naturalHeight;
+
+ const [height, width, channels] = options.inputSize;
+ const preOptions = options.preOptions || {};
+ const mean = preOptions.mean || [0, 0, 0, 0];
+ const std = preOptions.std || [1, 1, 1, 1];
+ const normlizationFlag = preOptions.norm || false;
+ const channelScheme = preOptions.channelScheme || 'RGB';
+ const imageChannels = options.imageChannels || 4; // RGBA
+ const drawOptions = options.drawOptions;
+
+ let canvasElement = document.createElement('canvas');
+ canvasElement.width = width;
+ canvasElement.height = height;
+ let canvasContext = canvasElement.getContext('2d');
+
+ if (drawOptions) {
+ canvasContext.drawImage(image, drawOptions.sx, drawOptions.sy, drawOptions.sWidth, drawOptions.sHeight,
+ 0, 0, drawOptions.dWidth, drawOptions.dHeight);
+ } else {
+ if (options.scaledFlag) {
+ const resizeRatio = Math.max(Math.max(image.width / width, image.height / height), 1);
+ const scaledWidth = Math.floor(image.width / resizeRatio);
+ const scaledHeight = Math.floor(image.height / resizeRatio);
+ canvasContext.drawImage(image, 0, 0, scaledWidth, scaledHeight);
+ } else {
+ canvasContext.drawImage(image, 0, 0, width, height);
+ }
+ }
+
+ let pixels = canvasContext.getImageData(0, 0, width, height).data;
+
+ if (normlizationFlag) {
+ pixels = new Float32Array(pixels).map(p => p / 255);
+ }
+
+ let infer_req = this._execNet.createInferRequest();
+ const input_blob = infer_req.getBlob(this._inputInfo.name());
+ const input_data = new Float32Array(input_blob.wmap());
+
+ if (channelScheme === 'RGB') {
+ if (channels > 1) {
+ for (let c = 0; c < channels; ++c) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ let value = pixels[h * width * imageChannels + w * imageChannels + c];
+ input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
+ }
+ }
+ }
+ } else if (channels === 1) {
+ for (let c = 0; c < channels; ++c) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ let index = h * width * imageChannels + w * imageChannels + c;
+ let value = (pixels[index] + pixels[index + 1] + pixels[index + 2]) / 3;
+ input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
+ }
+ }
+ }
+ }
+ } else if (channelScheme === 'BGR') {
+ for (let c = 0; c < channels; ++c) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ let value = pixels[h * width * imageChannels + w * imageChannels + (channels - c - 1)];
+ input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
+ }
+ }
+ }
+ } else {
+ throw new Error(`Unsupport '${channelScheme}' Color Channel Scheme `);
+ }
+
+ input_blob.unmap();
+ this._inferReq = infer_req;
+ }
+
+ /**
+ * This method is to get downsample audio buffer.
+ */
+ _downsampleAudioBuffer = (buffer, rate, baseRate) => {
+ if (rate == baseRate) {
+ return buffer;
+ }
+
+ if (baseRate > rate) {
+ throw "downsampling rate show be smaller than original sample rate";
+ }
+
+ const sampleRateRatio = Math.round(rate / baseRate);
+ const newLength = Math.round(buffer.length / sampleRateRatio);
+ let abuffer = new Float32Array(newLength);
+ let offsetResult = 0;
+ let offsetBuffer = 0;
+
+ while (offsetResult < abuffer.length) {
+ let nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
+ let accum = 0;
+ let count = 0;
+ for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
+ accum += buffer[i];
+ count++;
+ }
+ abuffer[offsetResult] = accum / count;
+ offsetResult++;
+ offsetBuffer = nextOffsetBuffer;
+ }
+ return abuffer;
+ };
+
+ /**
+ * This method is to get audio mfccs array.
+ */
+ _getAudioMfccs = (pcm, sampleRate, windowSize, windowStride,
+ upperFrequencyLimit = 4000,
+ lowerFrequencyLimit = 20,
+ filterbankChannelCount = 40,
+ dctCoefficientCount = 13) => {
+ let pcmPtr = Module._malloc(8 * pcm.length);
+ let lenPtr = Module._malloc(4);
+
+ for (let i = 0; i < pcm.length; i++) {
+ Module.HEAPF64[pcmPtr / 8 + i] = pcm[i];
+ };
+
+ Module.HEAP32[lenPtr / 4] = pcm.length;
+ let tfMfccs = Module.cwrap('tf_mfccs', 'number',
+ ['number', 'number', 'number', 'number',
+ 'number', 'number', 'number', 'number', 'number']);
+ let mfccsPtr = tfMfccs(pcmPtr, lenPtr, sampleRate, windowSize,
+ windowStride, upperFrequencyLimit, lowerFrequencyLimit,
+ filterbankChannelCount, dctCoefficientCount);
+ let mfccsLen = Module.HEAP32[lenPtr >> 2];
+ let audioMfccs = [mfccsLen];
+
+ for (let i = 0; i < mfccsLen; i++) {
+ audioMfccs[i] = Module.HEAPF64[(mfccsPtr >> 3) + i];
+ }
+
+ Module._free(pcmPtr, lenPtr, mfccsPtr);
+ return audioMfccs;
+ };
+
+ _getTensorByAudio = async (input) => {
+ const audio = input.src;
+ const options = input.options;
+ const sampleRate = options.sampleRate;
+ const mfccsOptions = options.mfccsOptions;
+ const inputSize = options.inputSize.reduce((a, b) => a * b);
+
+ let audioContext = new (window.AudioContext || window.webkitAudioContext)();
+ let rate = audioContext.sampleRate;
+
+ let request = new Request(audio.src);
+ let response = await fetch(request);
+ let audioFileData = await response.arrayBuffer();
+ let audioDecodeData = await audioContext.decodeAudioData(audioFileData);
+ let audioPCMData = audioDecodeData.getChannelData(0);
+ let abuffer = this._downsampleAudioBuffer(audioPCMData, rate, sampleRate);
+
+ if (typeof mfccsOptions !== 'undefined') {
+ abuffer = this._getAudioMfccs(abuffer,
+ sampleRate,
+ mfccsOptions.windowSize,
+ mfccsOptions.windowStride,
+ mfccsOptions.upperFrequencyLimit,
+ mfccsOptions.lowerFrequencyLimit,
+ mfccsOptions.filterbankChannelCount,
+ mfccsOptions.dctCoefficientCount);
+ }
+
+ let infer_req = this._execNet.createInferRequest();
+ const input_blob = infer_req.getBlob(this._inputInfo.name());
+ const input_data = new Float32Array(input_blob.wmap());
+
+ let inputDataLen = input_data.length;
+ let abufferLen = abuffer.length;
+ const maxLen = inputDataLen > abufferLen? inputDataLen : abufferLen;
+
+ for (let i = 0; i < maxLen; i++) {
+ if (i > inputDataLen) {
+ break;
+ } else if (i >= abufferLen) {
+ input_data[i] = 0;
+ } else {
+ input_data[i] = abuffer[i];
+ }
+ }
+
+ input_blob.unmap();
+ this._inferReq = infer_req;
+ }
+
+ /** @override */
+ _getInputTensor = async (input) => {
+ if (input.src.tagName === 'AUDIO') {
+ await this._getTensorByAudio(input);
+ } else {
+ this._getTensor(input);
+ }
+ };
+
+ _getOutputTensorTypedArray = () => {
+ const typedArray = this._currentModelInfo.isQuantized || false ? (this._currentModelInfo.isDNNL || this._currentModelInfo.isIE || false ? Float32Array : Uint8Array) : Float32Array;
+ return typedArray;
+ };
+
+ /** @override */
+ _getOutputTensor = () => {
+ return this._output;
+ };
+
+ /** @override */
+ _doInference = async () => {
+ await this._inferReq.startAsync();
+ const output_blob = this._inferReq.getBlob(this._outputInfo.name());
+ const typedArray = this._getOutputTensorTypedArray();
+ const output_data = new typedArray(output_blob.rmap());
+ this._output = output_data;
+ };
+}
\ No newline at end of file
diff --git a/examples/util/modelZoo.js b/examples/util/modelZoo.js
index 9bce8d132..20c33f673 100644
--- a/examples/util/modelZoo.js
+++ b/examples/util/modelZoo.js
@@ -276,7 +276,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1608.06993'
}, {
modelName: 'SqueezeNet (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'squeezenet_openvino',
modelSize: '4.9MB',
@@ -292,7 +292,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1602.07360'
}, {
modelName: 'MobileNet v1 (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'mobilenet_v1_openvino',
modelSize: '16.9MB',
@@ -309,7 +309,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/pdf/1704.04861.pdf'
}, {
modelName: 'MobileNet v2 (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'mobilenet_v2_openvino',
modelSize: '14.0MB',
@@ -326,7 +326,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1801.04381'
}, {
modelName: 'ResNet50 v1 (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'resnet50_v1_openvino',
modelSize: '102.1MB',
@@ -342,7 +342,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1512.03385'
}, {
modelName: 'DenseNet 121 (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'densenet_121_openvino',
modelSize: '31.9MB',
@@ -364,7 +364,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1608.06993'
}, {
modelName: 'Inception v2 (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'inception_v2_openvino',
modelSize: '44.7MB',
@@ -380,7 +380,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1512.00567'
}, {
modelName: 'Inception v4 (OpenVino)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'inception_v4_openvino',
modelSize: '170.6MB',
@@ -713,7 +713,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_224_Atrous (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_mobilenet_v2_224_atrous_openvino',
modelSize: '8.4MB',
@@ -730,7 +730,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_224_Quant (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_224_quant_openvino',
isQuantized: true,
@@ -749,7 +749,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_257_Atrous (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_mobilenet_v2_257_atrous_openvino',
modelSize: '8.4MB',
@@ -766,7 +766,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_257_Quant (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_257_quant_openvino',
isQuantized: true,
@@ -785,7 +785,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_321_Atrous (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_mobilenet_v2_321_atrous_openvino',
modelSize: '8.4MB',
@@ -802,7 +802,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_321_Quant (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_321_quant_openvino',
isQuantized: true,
@@ -821,7 +821,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_513_Atrous (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_mobilenet_v2_513_atrous_openvino',
modelSize: '8.4MB',
@@ -838,7 +838,7 @@ const modelZoo = {
paperUrl: 'https://arxiv.org/abs/1802.02611'
}, {
modelName: 'Deeplab_513_Quant (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'deeplab_513_quant_openvino',
isQuantized: true,
@@ -1083,7 +1083,7 @@ const modelZoo = {
paperUrl: 'https://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf'
}, {
modelName: 'KWS DNN (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'kws_dnn_openvino',
modelSize: '320kB',
@@ -1153,7 +1153,7 @@ const modelZoo = {
speechRecognitionModels: [{
modelName: 'wsj_dnn5b (OpenVINO)',
- framework: ['WebNN'],
+ framework: ['WebNN', 'OpenVINO.js'],
format: 'OpenVINO',
modelId: 'wsj_dnn5b_openvino',
modelSize: '115.6MB',
diff --git a/examples/util/openvino/OpenVINOModelImporter.js b/examples/util/openvino/OpenVINOModelImporter.js
index 1343da722..36dd944e2 100644
--- a/examples/util/openvino/OpenVINOModelImporter.js
+++ b/examples/util/openvino/OpenVINOModelImporter.js
@@ -46,6 +46,9 @@ class OpenVINOModelImporter {
supportedOps: this._supportedOps,
isOpenVINOModel: true,
};
+ console.log(this._backend);
+ console.log(this._bEagerMode);
+ console.log(this._supportedOps);
this._model = await this._nn.createModel(options);
this._addTensorOperands();
diff --git a/package.json b/package.json
index 3658c0ddf..127060930 100644
--- a/package.json
+++ b/package.json
@@ -51,6 +51,10 @@
"typescript": "^3.9.5",
"webpack": "^4.26.0",
"webpack-cli": "^3.1.2",
- "webpack-dev-server": "^3.1.10"
+ "webpack-dev-server": "^3.1.10",
+ "@webgpu/glslang": "0.0.12",
+ "@webgpu/types": "0.0.24",
+ "@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.4",
+ "inference-engine-node": "^0.1.4"
}
}
From d07153181d96ccdba870f4dc05a0651219e99b2c Mon Sep 17 00:00:00 2001
From: lionkun <871518554@qq.com>
Date: Tue, 10 Nov 2020 11:31:12 +0800
Subject: [PATCH 2/4] modify the code style
---
.../ImageClassificationExample.js | 5 +-
.../ImageClassificationOpenVINORunner.js | 28 +-
.../SemanticSegmentationOpenVINORunner.js | 1 -
.../speech_commands/SpeechCommandsExample.js | 1 +
.../SpeechRecognitionExample.js | 1 +
.../SpeechRecognitionOpenVINORunner.js | 38 +-
examples/util/OpenVINORunner.js | 557 +++++++++---------
package.json | 3 -
8 files changed, 312 insertions(+), 322 deletions(-)
diff --git a/examples/image_classification/ImageClassificationExample.js b/examples/image_classification/ImageClassificationExample.js
index 442f67059..33f4c1a5e 100644
--- a/examples/image_classification/ImageClassificationExample.js
+++ b/examples/image_classification/ImageClassificationExample.js
@@ -22,6 +22,7 @@ class ImageClassificationExample extends BaseCameraExample {
break;
case 'OpenVINO.js':
runner = new ImageClassificationOpenVINORunner();
+ break;
}
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
@@ -30,17 +31,15 @@ class ImageClassificationExample extends BaseCameraExample {
/** @override */
_processExtra = (output) => {
let labelClasses;
- console.log(output);
switch (this._currentFramework) {
case 'WebNN':
const deQuantizeParams = this._runner.getDeQuantizeParams();
labelClasses = getTopClasses(output.tensor, output.labels, 3, deQuantizeParams);
break;
case 'OpenCV.js':
- labelClasses = getTopClasses(output.tensor, output.labels, 3);
- break;
case 'OpenVINO.js':
labelClasses = getTopClasses(output.tensor, output.labels, 3);
+ break;
}
$('#inferenceresult').show();
labelClasses.forEach((c, i) => {
diff --git a/examples/image_classification/ImageClassificationOpenVINORunner.js b/examples/image_classification/ImageClassificationOpenVINORunner.js
index 7e88ee6e6..f748a4917 100644
--- a/examples/image_classification/ImageClassificationOpenVINORunner.js
+++ b/examples/image_classification/ImageClassificationOpenVINORunner.js
@@ -1,17 +1,17 @@
class ImageClassificationOpenVINORunner extends OpenVINORunner {
- constructor() {
- super();
+ constructor() {
+ super();
+ }
+
+ /** @override */
+ _getOutputTensor = () => {
+ const postSoftmax = this._postOptions.softmax || false;
+ let outputTensor;
+ if(postSoftmax) {
+ outputTensor = softmax(this._output);
+ } else {
+ outputTensor = this._output;
}
-
- /** @override */
- _getOutputTensor = () => {
- const postSoftmax = this._postOptions.softmax || false;
- let outputTensor;
- if(postSoftmax) {
- outputTensor = softmax(this._output);
- } else {
- outputTensor = this._output;
- }
- return outputTensor;
- };
+ return outputTensor;
+ };
}
\ No newline at end of file
diff --git a/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js b/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js
index 8ad0f59d5..fd2e9e41a 100644
--- a/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js
+++ b/examples/semantic_segmentation/SemanticSegmentationOpenVINORunner.js
@@ -13,5 +13,4 @@ class SemanticSegmentationOpenVINORunner extends OpenVINORunner {
let outputTensor = this._output;
return outputTensor;
};
-
}
\ No newline at end of file
diff --git a/examples/speech_commands/SpeechCommandsExample.js b/examples/speech_commands/SpeechCommandsExample.js
index c0334f6a3..bbaa4b4ce 100644
--- a/examples/speech_commands/SpeechCommandsExample.js
+++ b/examples/speech_commands/SpeechCommandsExample.js
@@ -40,6 +40,7 @@ class SpeechCommandsExample extends BaseMircophoneExample {
break;
case 'OpenVINO.js':
runner = new OpenVINORunner();
+ break;
}
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
diff --git a/examples/speech_recognition/SpeechRecognitionExample.js b/examples/speech_recognition/SpeechRecognitionExample.js
index 114fbd02a..a1330375b 100644
--- a/examples/speech_recognition/SpeechRecognitionExample.js
+++ b/examples/speech_recognition/SpeechRecognitionExample.js
@@ -26,6 +26,7 @@ class SpeechRecognitionExample extends BaseMircophoneExample {
break;
case 'OpenVINO.js':
runner = new SpeechRecognitionOpenVINORunner();
+ break;
}
runner.setProgressHandler(updateLoadingProgressComponent);
return runner;
diff --git a/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js b/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js
index f5e2693bf..764aab27b 100644
--- a/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js
+++ b/examples/speech_recognition/SpeechRecognitionOpenVINORunner.js
@@ -1,22 +1,22 @@
class SpeechRecognitionOpenVINORunner extends OpenVINORunner {
- constructor() {
- super();
- }
-
- _getInputTensor = (input) => {
- let infer_req = this._execNet.createInferRequest();
- const input_blob = infer_req.getBlob(this._inputInfo.name());
- const input_data = new Float32Array(input_blob.wmap());
+ constructor() {
+ super();
+ }
+
+ _getInputTensor = (input) => {
+ let infer_req = this._execNet.createInferRequest();
+ const input_blob = infer_req.getBlob(this._inputInfo.name());
+ const input_data = new Float32Array(input_blob.wmap());
- for(let index = 0; index < input.length; index++) {
- input_data[index] = input[index];
- }
- input_blob.unmap();
- this._inferReq = infer_req;
- };
+ for(let index = 0; index < input.length; index++) {
+ input_data[index] = input[index];
+ }
+ input_blob.unmap();
+ this._inferReq = infer_req;
+ };
- _getOutputTensor = () => {
- let outputTensor = this._output;
- return outputTensor;
- };
- }
\ No newline at end of file
+ _getOutputTensor = () => {
+ let outputTensor = this._output;
+ return outputTensor;
+ };
+}
\ No newline at end of file
diff --git a/examples/util/OpenVINORunner.js b/examples/util/OpenVINORunner.js
index fe5de90b8..8c117767b 100644
--- a/examples/util/OpenVINORunner.js
+++ b/examples/util/OpenVINORunner.js
@@ -1,318 +1,311 @@
var ie = null;
-
try {
- ie = require("inference-engine-node")
+ ie = require("inference-engine-node")
} catch(e) {
console.log(e);
}
class OpenVINORunner extends BaseRunner {
- constructor() {
- super();
- this._output = null;
- this._rawModel = null;
- this._ieCore = null;
- this._network = null;
- this._execNet = null;
- this._tensor = null;
- this._postOptions = null;
- this._deQuantizeParams = null;
- this._inputInfo = null;
- this._outputInfo = null;
- if (ie !== null) {
- console.log("Create Core");
- this._ieCore = ie.createCore();
- }
+ constructor() {
+ super();
+ this._output = null;
+ this._rawModel = null;
+ this._ieCore = null;
+ this._network = null;
+ this._execNet = null;
+ this._tensor = null;
+ this._postOptions = null;
+ this._deQuantizeParams = null;
+ this._inputInfo = null;
+ this._outputInfo = null;
+ if (ie !== null) {
+ this._ieCore = ie.createCore();
}
-
- _setDeQuantizeParams = (params) => {
- this._deQuantizeParams = params;
- };
-
- /** @override */
- _loadModelFile = async (url) => {
- if (this._ieCore !== null) {
- if (url !== undefined) {
- const arrayBuffer = await this._loadURL(url, this._progressHandler, true);
- const bytes = new Uint8Array(arrayBuffer);
- switch (url.split('.').pop()) {
- case 'bin':
- const networkURL = url.replace(/bin$/, 'xml');
- const networkFile = await this._loadURL(networkURL);
- const weightsBuffer = bytes.buffer;
- var network = await this._ieCore.readNetworkFromData(networkFile, weightsBuffer);
- var inputs_info = network.getInputsInfo();
- var outputs_info = network.getOutputsInfo();
- this._network = network;
- this._inputInfo = inputs_info[0];
- let dims = this._inputInfo.getDims();
- console.log(dims);
- if (dims.length === 4) {
- this._inputInfo.setLayout('nhwc');
- }
- this._outputInfo = outputs_info[0];
+ }
+
+ _setDeQuantizeParams = (params) => {
+ this._deQuantizeParams = params;
+ };
+
+ /** @override */
+ _loadModelFile = async (url) => {
+ if (this._ieCore !== null) {
+ if (url !== undefined) {
+ const arrayBuffer = await this._loadURL(url, this._progressHandler, true);
+ const bytes = new Uint8Array(arrayBuffer);
+ switch (url.split('.').pop()) {
+ case 'bin':
+ const networkURL = url.replace(/bin$/, 'xml');
+ const networkFile = await this._loadURL(networkURL);
+ const weightsBuffer = bytes.buffer;
+ var network = await this._ieCore.readNetworkFromData(networkFile, weightsBuffer);
+ var inputs_info = network.getInputsInfo();
+ var outputs_info = network.getOutputsInfo();
+ this._network = network;
+ this._inputInfo = inputs_info[0];
+ let dims = this._inputInfo.getDims();
+ if (dims.length === 4) {
+ this._inputInfo.setLayout('nhwc');
}
- this._setLoadedFlag(true);
- console.log("Load model successfully");
- } else {
- throw new Error(`There's none model file info, please check config info of modelZoo.`);
- }
- } else {
- throw new Error(`The infernece-engine-node is not worked, please check the Node.js platform is enabled`);
+ this._outputInfo = outputs_info[0];
}
- };
-
- /** @override */
- doInitialization = (modelInfo) => {
- this._setLoadedFlag(false);
- this._setInitializedFlag(false);
- this._setModelInfo(modelInfo);
- this._setDeQuantizeParams([]);
- };
-
- /** @override */
- _doCompile = async (options) => {
- const modelFormat = this._currentModelInfo.format;
- if (modelFormat === 'OpenVINO') {
- let exec_net = await this._ieCore.loadNetwork(this._network, "CPU");
- this._execNet = exec_net;
-
- this._postOptions = this._currentModelInfo.postOptions || {};
-
+ this._setLoadedFlag(true);
} else {
- throw new Error(`Unsupported '${this._currentModelInfo.format}' input`);
+ throw new Error(`There's none model file info, please check config info of modelZoo.`);
+ }
+ } else {
+ throw new Error(`The infernece-engine-node is not worked, please check the Node.js platform is enabled`);
+ }
+ };
+
+ /** @override */
+ doInitialization = (modelInfo) => {
+ this._setLoadedFlag(false);
+ this._setInitializedFlag(false);
+ this._setModelInfo(modelInfo);
+ this._setDeQuantizeParams([]);
+ };
+
+ /** @override */
+ _doCompile = async (options) => {
+ const modelFormat = this._currentModelInfo.format;
+ if (modelFormat === 'OpenVINO') {
+ let exec_net = await this._ieCore.loadNetwork(this._network, "CPU");
+ this._execNet = exec_net;
+ this._postOptions = this._currentModelInfo.postOptions || {};
+ } else {
+ throw new Error(`Unsupported '${this._currentModelInfo.format}' input`);
+ }
+ }
+
+ getDeQuantizeParams = () => {
+ return this._deQuantizeParams;
+ };
+
+ _getTensor = (input) => {
+ const image = input.src;
+ const options = input.options;
+
+ image.width = image.videoWidth || image.naturalWidth;
+ image.height = image.videoHeight || image.naturalHeight;
+
+ const [height, width, channels] = options.inputSize;
+ const preOptions = options.preOptions || {};
+ const mean = preOptions.mean || [0, 0, 0, 0];
+ const std = preOptions.std || [1, 1, 1, 1];
+ const normlizationFlag = preOptions.norm || false;
+ const channelScheme = preOptions.channelScheme || 'RGB';
+ const imageChannels = options.imageChannels || 4; // RGBA
+ const drawOptions = options.drawOptions;
+
+ let canvasElement = document.createElement('canvas');
+ canvasElement.width = width;
+ canvasElement.height = height;
+ let canvasContext = canvasElement.getContext('2d');
+
+ if (drawOptions) {
+ canvasContext.drawImage(image, drawOptions.sx, drawOptions.sy, drawOptions.sWidth, drawOptions.sHeight,
+ 0, 0, drawOptions.dWidth, drawOptions.dHeight);
+ } else {
+ if (options.scaledFlag) {
+ const resizeRatio = Math.max(Math.max(image.width / width, image.height / height), 1);
+ const scaledWidth = Math.floor(image.width / resizeRatio);
+ const scaledHeight = Math.floor(image.height / resizeRatio);
+ canvasContext.drawImage(image, 0, 0, scaledWidth, scaledHeight);
+ } else {
+ canvasContext.drawImage(image, 0, 0, width, height);
}
}
- getDeQuantizeParams = () => {
- return this._deQuantizeParams;
- };
-
- _getTensor = (input) => {
- const image = input.src;
- const options = input.options;
- // var tensor = this._inputTensor[0];
-
- image.width = image.videoWidth || image.naturalWidth;
- image.height = image.videoHeight || image.naturalHeight;
-
- const [height, width, channels] = options.inputSize;
- const preOptions = options.preOptions || {};
- const mean = preOptions.mean || [0, 0, 0, 0];
- const std = preOptions.std || [1, 1, 1, 1];
- const normlizationFlag = preOptions.norm || false;
- const channelScheme = preOptions.channelScheme || 'RGB';
- const imageChannels = options.imageChannels || 4; // RGBA
- const drawOptions = options.drawOptions;
-
- let canvasElement = document.createElement('canvas');
- canvasElement.width = width;
- canvasElement.height = height;
- let canvasContext = canvasElement.getContext('2d');
-
- if (drawOptions) {
- canvasContext.drawImage(image, drawOptions.sx, drawOptions.sy, drawOptions.sWidth, drawOptions.sHeight,
- 0, 0, drawOptions.dWidth, drawOptions.dHeight);
- } else {
- if (options.scaledFlag) {
- const resizeRatio = Math.max(Math.max(image.width / width, image.height / height), 1);
- const scaledWidth = Math.floor(image.width / resizeRatio);
- const scaledHeight = Math.floor(image.height / resizeRatio);
- canvasContext.drawImage(image, 0, 0, scaledWidth, scaledHeight);
- } else {
- canvasContext.drawImage(image, 0, 0, width, height);
- }
- }
-
- let pixels = canvasContext.getImageData(0, 0, width, height).data;
+ let pixels = canvasContext.getImageData(0, 0, width, height).data;
- if (normlizationFlag) {
- pixels = new Float32Array(pixels).map(p => p / 255);
- }
+ if (normlizationFlag) {
+ pixels = new Float32Array(pixels).map(p => p / 255);
+ }
- let infer_req = this._execNet.createInferRequest();
- const input_blob = infer_req.getBlob(this._inputInfo.name());
- const input_data = new Float32Array(input_blob.wmap());
-
- if (channelScheme === 'RGB') {
- if (channels > 1) {
- for (let c = 0; c < channels; ++c) {
- for (let h = 0; h < height; ++h) {
- for (let w = 0; w < width; ++w) {
- let value = pixels[h * width * imageChannels + w * imageChannels + c];
- input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
- }
- }
- }
- } else if (channels === 1) {
- for (let c = 0; c < channels; ++c) {
- for (let h = 0; h < height; ++h) {
- for (let w = 0; w < width; ++w) {
- let index = h * width * imageChannels + w * imageChannels + c;
- let value = (pixels[index] + pixels[index + 1] + pixels[index + 2]) / 3;
- input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
- }
- }
+ let infer_req = this._execNet.createInferRequest();
+ const input_blob = infer_req.getBlob(this._inputInfo.name());
+ const input_data = new Float32Array(input_blob.wmap());
+
+ if (channelScheme === 'RGB') {
+ if (channels > 1) {
+ for (let c = 0; c < channels; ++c) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ let value = pixels[h * width * imageChannels + w * imageChannels + c];
+ input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
}
}
- } else if (channelScheme === 'BGR') {
- for (let c = 0; c < channels; ++c) {
- for (let h = 0; h < height; ++h) {
- for (let w = 0; w < width; ++w) {
- let value = pixels[h * width * imageChannels + w * imageChannels + (channels - c - 1)];
- input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
- }
+ }
+ } else if (channels === 1) {
+ for (let c = 0; c < channels; ++c) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ let index = h * width * imageChannels + w * imageChannels + c;
+ let value = (pixels[index] + pixels[index + 1] + pixels[index + 2]) / 3;
+ input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
}
}
- } else {
- throw new Error(`Unsupport '${channelScheme}' Color Channel Scheme `);
}
-
- input_blob.unmap();
- this._inferReq = infer_req;
+ }
+ } else if (channelScheme === 'BGR') {
+ for (let c = 0; c < channels; ++c) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ let value = pixels[h * width * imageChannels + w * imageChannels + (channels - c - 1)];
+ input_data[h * width * channels + w * channels + c] = (value - mean[c]) / std[c];
+ }
+ }
+ }
+ } else {
+ throw new Error(`Unsupport '${channelScheme}' Color Channel Scheme `);
}
- /**
- * This method is to get downsample audio buffer.
- */
- _downsampleAudioBuffer = (buffer, rate, baseRate) => {
- if (rate == baseRate) {
- return buffer;
- }
+ input_blob.unmap();
+ this._inferReq = infer_req;
+ }
- if (baseRate > rate) {
- throw "downsampling rate show be smaller than original sample rate";
- }
+ /**
+ * This method is to get downsample audio buffer.
+ */
+ _downsampleAudioBuffer = (buffer, rate, baseRate) => {
+ if (rate == baseRate) {
+ return buffer;
+ }
- const sampleRateRatio = Math.round(rate / baseRate);
- const newLength = Math.round(buffer.length / sampleRateRatio);
- let abuffer = new Float32Array(newLength);
- let offsetResult = 0;
- let offsetBuffer = 0;
-
- while (offsetResult < abuffer.length) {
- let nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
- let accum = 0;
- let count = 0;
- for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
- accum += buffer[i];
- count++;
- }
- abuffer[offsetResult] = accum / count;
- offsetResult++;
- offsetBuffer = nextOffsetBuffer;
- }
- return abuffer;
- };
+ if (baseRate > rate) {
+ throw "downsampling rate show be smaller than original sample rate";
+ }
- /**
- * This method is to get audio mfccs array.
- */
- _getAudioMfccs = (pcm, sampleRate, windowSize, windowStride,
- upperFrequencyLimit = 4000,
- lowerFrequencyLimit = 20,
- filterbankChannelCount = 40,
- dctCoefficientCount = 13) => {
- let pcmPtr = Module._malloc(8 * pcm.length);
- let lenPtr = Module._malloc(4);
-
- for (let i = 0; i < pcm.length; i++) {
- Module.HEAPF64[pcmPtr / 8 + i] = pcm[i];
- };
-
- Module.HEAP32[lenPtr / 4] = pcm.length;
- let tfMfccs = Module.cwrap('tf_mfccs', 'number',
- ['number', 'number', 'number', 'number',
- 'number', 'number', 'number', 'number', 'number']);
- let mfccsPtr = tfMfccs(pcmPtr, lenPtr, sampleRate, windowSize,
- windowStride, upperFrequencyLimit, lowerFrequencyLimit,
- filterbankChannelCount, dctCoefficientCount);
- let mfccsLen = Module.HEAP32[lenPtr >> 2];
- let audioMfccs = [mfccsLen];
-
- for (let i = 0; i < mfccsLen; i++) {
- audioMfccs[i] = Module.HEAPF64[(mfccsPtr >> 3) + i];
+ const sampleRateRatio = Math.round(rate / baseRate);
+ const newLength = Math.round(buffer.length / sampleRateRatio);
+ let abuffer = new Float32Array(newLength);
+ let offsetResult = 0;
+ let offsetBuffer = 0;
+
+ while (offsetResult < abuffer.length) {
+ let nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
+ let accum = 0;
+ let count = 0;
+ for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
+ accum += buffer[i];
+ count++;
}
-
- Module._free(pcmPtr, lenPtr, mfccsPtr);
- return audioMfccs;
+ abuffer[offsetResult] = accum / count;
+ offsetResult++;
+ offsetBuffer = nextOffsetBuffer;
+ }
+ return abuffer;
+ };
+
+ /**
+ * This method is to get audio mfccs array.
+ */
+ _getAudioMfccs = (pcm, sampleRate, windowSize, windowStride,
+ upperFrequencyLimit = 4000,
+ lowerFrequencyLimit = 20,
+ filterbankChannelCount = 40,
+ dctCoefficientCount = 13) => {
+ let pcmPtr = Module._malloc(8 * pcm.length);
+ let lenPtr = Module._malloc(4);
+
+ for (let i = 0; i < pcm.length; i++) {
+ Module.HEAPF64[pcmPtr / 8 + i] = pcm[i];
};
- _getTensorByAudio = async (input) => {
- const audio = input.src;
- const options = input.options;
- const sampleRate = options.sampleRate;
- const mfccsOptions = options.mfccsOptions;
- const inputSize = options.inputSize.reduce((a, b) => a * b);
-
- let audioContext = new (window.AudioContext || window.webkitAudioContext)();
- let rate = audioContext.sampleRate;
-
- let request = new Request(audio.src);
- let response = await fetch(request);
- let audioFileData = await response.arrayBuffer();
- let audioDecodeData = await audioContext.decodeAudioData(audioFileData);
- let audioPCMData = audioDecodeData.getChannelData(0);
- let abuffer = this._downsampleAudioBuffer(audioPCMData, rate, sampleRate);
-
- if (typeof mfccsOptions !== 'undefined') {
- abuffer = this._getAudioMfccs(abuffer,
- sampleRate,
- mfccsOptions.windowSize,
- mfccsOptions.windowStride,
- mfccsOptions.upperFrequencyLimit,
- mfccsOptions.lowerFrequencyLimit,
- mfccsOptions.filterbankChannelCount,
- mfccsOptions.dctCoefficientCount);
- }
-
- let infer_req = this._execNet.createInferRequest();
- const input_blob = infer_req.getBlob(this._inputInfo.name());
- const input_data = new Float32Array(input_blob.wmap());
-
- let inputDataLen = input_data.length;
- let abufferLen = abuffer.length;
- const maxLen = inputDataLen > abufferLen? inputDataLen : abufferLen;
-
- for (let i = 0; i < maxLen; i++) {
- if (i > inputDataLen) {
- break;
- } else if (i >= abufferLen) {
- input_data[i] = 0;
- } else {
- input_data[i] = abuffer[i];
- }
- }
+ Module.HEAP32[lenPtr / 4] = pcm.length;
+ let tfMfccs = Module.cwrap('tf_mfccs', 'number',
+ ['number', 'number', 'number', 'number',
+ 'number', 'number', 'number', 'number', 'number']);
+ let mfccsPtr = tfMfccs(pcmPtr, lenPtr, sampleRate, windowSize,
+ windowStride, upperFrequencyLimit, lowerFrequencyLimit,
+ filterbankChannelCount, dctCoefficientCount);
+ let mfccsLen = Module.HEAP32[lenPtr >> 2];
+ let audioMfccs = [mfccsLen];
+
+ for (let i = 0; i < mfccsLen; i++) {
+ audioMfccs[i] = Module.HEAPF64[(mfccsPtr >> 3) + i];
+ }
- input_blob.unmap();
- this._inferReq = infer_req;
+ Module._free(pcmPtr, lenPtr, mfccsPtr);
+ return audioMfccs;
+ };
+
+ _getTensorByAudio = async (input) => {
+ const audio = input.src;
+ const options = input.options;
+ const sampleRate = options.sampleRate;
+ const mfccsOptions = options.mfccsOptions;
+ const inputSize = options.inputSize.reduce((a, b) => a * b);
+
+ let audioContext = new (window.AudioContext || window.webkitAudioContext)();
+ let rate = audioContext.sampleRate;
+
+ let request = new Request(audio.src);
+ let response = await fetch(request);
+ let audioFileData = await response.arrayBuffer();
+ let audioDecodeData = await audioContext.decodeAudioData(audioFileData);
+ let audioPCMData = audioDecodeData.getChannelData(0);
+ let abuffer = this._downsampleAudioBuffer(audioPCMData, rate, sampleRate);
+
+ if (typeof mfccsOptions !== 'undefined') {
+ abuffer = this._getAudioMfccs(abuffer,
+ sampleRate,
+ mfccsOptions.windowSize,
+ mfccsOptions.windowStride,
+ mfccsOptions.upperFrequencyLimit,
+ mfccsOptions.lowerFrequencyLimit,
+ mfccsOptions.filterbankChannelCount,
+ mfccsOptions.dctCoefficientCount);
}
- /** @override */
- _getInputTensor = async (input) => {
- if (input.src.tagName === 'AUDIO') {
- await this._getTensorByAudio(input);
- } else {
- this._getTensor(input);
- }
- };
+ let infer_req = this._execNet.createInferRequest();
+ const input_blob = infer_req.getBlob(this._inputInfo.name());
+ const input_data = new Float32Array(input_blob.wmap());
- _getOutputTensorTypedArray = () => {
- const typedArray = this._currentModelInfo.isQuantized || false ? (this._currentModelInfo.isDNNL || this._currentModelInfo.isIE || false ? Float32Array : Uint8Array) : Float32Array;
- return typedArray;
- };
+ let inputDataLen = input_data.length;
+ let abufferLen = abuffer.length;
+ const maxLen = inputDataLen > abufferLen? inputDataLen : abufferLen;
- /** @override */
- _getOutputTensor = () => {
- return this._output;
- };
+ for (let i = 0; i < maxLen; i++) {
+ if (i > inputDataLen) {
+ break;
+ } else if (i >= abufferLen) {
+ input_data[i] = 0;
+ } else {
+ input_data[i] = abuffer[i];
+ }
+ }
- /** @override */
- _doInference = async () => {
- await this._inferReq.startAsync();
- const output_blob = this._inferReq.getBlob(this._outputInfo.name());
- const typedArray = this._getOutputTensorTypedArray();
- const output_data = new typedArray(output_blob.rmap());
- this._output = output_data;
- };
+ input_blob.unmap();
+ this._inferReq = infer_req;
+ }
+
+ /** @override */
+ _getInputTensor = async (input) => {
+ if (input.src.tagName === 'AUDIO') {
+ await this._getTensorByAudio(input);
+ } else {
+ this._getTensor(input);
+ }
+ };
+
+ _getOutputTensorTypedArray = () => {
+ const typedArray = this._currentModelInfo.isQuantized || false ? (this._currentModelInfo.isDNNL || this._currentModelInfo.isIE || false ? Float32Array : Uint8Array) : Float32Array;
+ return typedArray;
+ };
+
+ /** @override */
+ _getOutputTensor = () => {
+ return this._output;
+ };
+
+ /** @override */
+ _doInference = async () => {
+ await this._inferReq.startAsync();
+ const output_blob = this._inferReq.getBlob(this._outputInfo.name());
+ const typedArray = this._getOutputTensorTypedArray();
+ const output_data = new typedArray(output_blob.rmap());
+ this._output = output_data;
+ };
}
\ No newline at end of file
diff --git a/package.json b/package.json
index 127060930..f70b8ebc5 100644
--- a/package.json
+++ b/package.json
@@ -52,9 +52,6 @@
"webpack": "^4.26.0",
"webpack-cli": "^3.1.2",
"webpack-dev-server": "^3.1.10",
- "@webgpu/glslang": "0.0.12",
- "@webgpu/types": "0.0.24",
- "@tensorflow/tfjs-backend-webgpu": "0.0.1-alpha.4",
"inference-engine-node": "^0.1.4"
}
}
From 1c9470aad3f0fa7c7724f6e87be073207ef283d2 Mon Sep 17 00:00:00 2001
From: lionkun <871518554@qq.com>
Date: Sun, 29 Nov 2020 19:32:35 +0800
Subject: [PATCH 3/4] add the backend setting for the OpenVINO.js
---
examples/image_classification/index.html | 9 ++++
examples/semantic_segmentation/index.html | 9 ++++
examples/speech_commands/index.html | 9 ++++
examples/speech_recognition/index.html | 9 ++++
examples/static/js/ui.common.js | 12 +++++
examples/util/BaseExample.js | 41 +++++++++++++----
examples/util/OpenVINORunner.js | 54 +++++++++++++++++++----
7 files changed, 125 insertions(+), 18 deletions(-)
diff --git a/examples/image_classification/index.html b/examples/image_classification/index.html
index 345cdb2ac..3534f2768 100755
--- a/examples/image_classification/index.html
+++ b/examples/image_classification/index.html
@@ -119,6 +119,15 @@
+
+ Backend |
+
+
+
+
+
+ |
+
diff --git a/examples/semantic_segmentation/index.html b/examples/semantic_segmentation/index.html
index 04cfa6e89..ab8d723ea 100644
--- a/examples/semantic_segmentation/index.html
+++ b/examples/semantic_segmentation/index.html
@@ -119,6 +119,15 @@
+
+ Backend |
+
+
+
+
+
+ |
+
diff --git a/examples/speech_commands/index.html b/examples/speech_commands/index.html
index bcbe4c3c6..594c0af38 100755
--- a/examples/speech_commands/index.html
+++ b/examples/speech_commands/index.html
@@ -109,6 +109,15 @@
+
+ Backend |
+
+
+
+
+
+ |
+
diff --git a/examples/speech_recognition/index.html b/examples/speech_recognition/index.html
index 31ca634ae..b7c2b1829 100755
--- a/examples/speech_recognition/index.html
+++ b/examples/speech_recognition/index.html
@@ -109,6 +109,15 @@
+
+ Backend |
+
+
+
+
+
+ |
+
diff --git a/examples/static/js/ui.common.js b/examples/static/js/ui.common.js
index ced2497ac..13037d308 100644
--- a/examples/static/js/ui.common.js
+++ b/examples/static/js/ui.common.js
@@ -226,6 +226,18 @@ const updateOpenCVJSBackendComponentsStyle = (selectedBackend) => {
$('#l-opencvjs' + _selectedBackend).addClass('checked');
};
+const updateOpenVINOJSBackendComponentsStyle = (selectedBackend) => {
+ const _selectedBackend = selectedBackend.toLocaleLowerCase().replace(' ', '');
+ $('.openvinojsbackend input').attr('disabled', false);
+ $('.openvinojsbackend label').removeClass('cursordefault');
+ $('#openvinojsbackend' + _selectedBackend).attr('disabled', true);
+ $('#l-openvinojsbackend' + _selectedBackend).addClass('cursordefault');
+ $('.openvinojsbackend input').removeAttr('checked');
+ $('.openvinojsbackend label').removeClass('checked');
+ $('#openvinojs' + _selectedBackend).attr('checked', 'checked');
+ $('#l-openvinojs' + _selectedBackend).addClass('checked');
+}
+
const setPreferenceTipComponents = () => {
if ($('#backendpolyfilltitle')) {
$('#backendpolyfilltitle').attr('data-html', 'true')
diff --git a/examples/util/BaseExample.js b/examples/util/BaseExample.js
index 1b7e31deb..fa67e4863 100644
--- a/examples/util/BaseExample.js
+++ b/examples/util/BaseExample.js
@@ -16,7 +16,7 @@ class BaseExample extends BaseApp {
this._currentModelInfo = {};
this._currentFramework; //'OpenCV.js' | 'WebNN' | 'OpenVINO.js'
this._currentOpenCVJSBackend; // 'WASM' | 'SIMD' | 'Threads' | 'Threads+SIMD'
- this._currentOpenVINOJSBackend;// 'WASM' | 'WebGL' | 'WebML' | 'WebGPU'
+ this._currentOpenVINOJSBackend = 'CPU';// 'CPU' | 'GPU'
this._runtimeInitialized = false; // for 'OpenCV.js', always true for other framework
this._currentTimeoutId = 0;
this._isStreaming = false; // for inference camera video
@@ -219,6 +219,7 @@ class BaseExample extends BaseApp {
case 'WebNN':
$('.backend').show();
$('.opencvjsbackend').hide();
+ $('.openvinojsbackend').hide();
this._setRuntimeInitialized(true);
const prefer = parseSearchParams('prefer');
this._setPrefer(prefer);
@@ -229,6 +230,7 @@ class BaseExample extends BaseApp {
case 'OpenCV.js':
$('.backend').hide();
$('.opencvjsbackend').show();
+ $('.openvinojsbackend').hide();
const opencvBackend = parseSearchParams('b');
if (opencvBackend != this._currentOpenCVJSBackend) {
showOpenCVRuntimeProgressComponent();
@@ -240,10 +242,11 @@ class BaseExample extends BaseApp {
case 'OpenVINO.js':
$('.backend').hide();
$('.opencvjsbackend').hide();
+ $('.openvinojsbackend').show();
const openVINObackend = parseSearchParams('b');
- this._setOpenVINOJSBackend(openVINObackend)
+ this._setOpenVINOJSBackend(openVINObackend);
this._setRuntimeInitialized(true);
- locSearch = `?m=${this.openVINObackend}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
+ locSearch = `?m=${this._currentOpenVINOJSBackend}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
break;
}
}
@@ -298,15 +301,19 @@ class BaseExample extends BaseApp {
}
updateBackendComponents(this._currentBackend, this._currentPrefer);
$('.opencvjsbackend').hide();
+ $('.openvinojsbackend').hide();
break;
case 'OpenCV.js':
$('.backend').hide();
$('.opencvjsbackend').show();
+ $('.openvinojsbackend').hide();
updateOpenCVJSBackendComponentsStyle(this._currentOpenCVJSBackend);
break;
case 'OpenVINO.js':
$('.backend').hide();
$('.opencvjsbackend').hide();
+ $('.openvinojsbackend').show();
+ updateOpenVINOJSBackendComponentsStyle(this._currentOpenVINOJSBackend);
break;
}
updateSIMDNotes();
@@ -360,6 +367,7 @@ class BaseExample extends BaseApp {
updateFrameworkComponentsStyle(framework);
if (framework === 'OpenCV.js') {
$('.backend').hide();
+ $('.openvinojsbackend').hide();
$('.offload').hide();
$('.opencvjsbackend').show();
this._setRuntimeInitialized(false);
@@ -389,6 +397,7 @@ class BaseExample extends BaseApp {
updateTitleComponent(this._currentOpenCVJSBackend, null, this._currentModelId, this._inferenceModels);
} else if (framework === 'WebNN') {
$('.opencvjsbackend').hide();
+ $('.openvinojsbackend').hide();
$('.backend').show();
$('#progressruntime').hide();
this._setFramework(framework);
@@ -400,11 +409,9 @@ class BaseExample extends BaseApp {
this._resetOutput();
this.main();
} else if (framework === 'OpenVINO.js') {
-
-
-
$('.opencvjsbackend').hide();
- $('.backend').show();
+ $('.backend').hide();
+ $('.openvinojsbackend').show();
$('#progressruntime').hide();
this._setFramework(framework);
this._setRuntimeInitialized(true);
@@ -412,8 +419,11 @@ class BaseExample extends BaseApp {
this._showDynamicComponents();
this._modelClickBinding();
this._runner = null;
+ if (typeof this._currentOpenVINOJSBackend === 'undefined') {
+ this._setOpenVINOJSBackend('CPU');
+ }
this._resetOutput();
- updateTitleComponent(this._currentBackend, null, this._currentModelId, this._inferenceModels);
+ updateTitleComponent(this._currentOpenVINOJSBackend, null, this._currentModelId, this._inferenceModels);
this.main();
}
});
@@ -579,6 +589,17 @@ class BaseExample extends BaseApp {
this._updateHistoryEntryURL();
});
+ // Click trigger of openvinojsbackend element
+ $('input:radio[name=openvinojsbackend]').click(() => {
+ $('.alert').hide();
+ let selectedBackend = $('input:radio[name="openvinojsbackend"]:checked').attr('value');
+ updateOpenVINOJSBackendComponentsStyle(selectedBackend);
+ this._setOpenVINOJSBackend(selectedBackend);
+ const locSearch = `?b=${this._currentOpenVINOJSBackend}&m=${this._currentModelId}&s=${this._currentInputType}&d=${this._hiddenControlsFlag}&f=${this._currentFramework}`;
+ this._updateHistoryEntryURL();
+ this.main();
+ });
+
// Click trigger to do inference with
element
$('#img').click(() => {
$('.alert').hide();
@@ -702,6 +723,8 @@ class BaseExample extends BaseApp {
const supportedOps = getSupportedOps(this._currentBackend, this._currentPrefer);
options.supportedOps = supportedOps;
options.eagerMode = false;
+ } else if (this._currentFramework === 'OpenVINO.js') {
+ options.backend = this._currentOpenVINOJSBackend;
}
return options;
@@ -877,7 +900,7 @@ class BaseExample extends BaseApp {
} else if (this._currentFramework === 'OpenCV.js') {
updateTitleComponent(this._currentOpenCVJSBackend, null, this._currentModelId, this._inferenceModels);
} else if (this._currentFramework === 'OpenVINO.js') {
- updateTitleComponent(this._currentBackend, null, this._currentModelId, this._inferenceModels);
+ updateTitleComponent(this._currentOpenVINOJSBackend, null, this._currentModelId, this._inferenceModels);
}
console.log(location.search);
diff --git a/examples/util/OpenVINORunner.js b/examples/util/OpenVINORunner.js
index 8c117767b..2d72d7a0f 100644
--- a/examples/util/OpenVINORunner.js
+++ b/examples/util/OpenVINORunner.js
@@ -18,11 +18,54 @@ class OpenVINORunner extends BaseRunner {
this._deQuantizeParams = null;
this._inputInfo = null;
this._outputInfo = null;
+ this._currentBackend = null;
if (ie !== null) {
this._ieCore = ie.createCore();
}
+ // this._configureBackend();
}
+ _setBackend = (backend) => {
+ this._currentBackend = backend;
+ };
+
+ /** @override */
+ doInitialization = (modelInfo) => {
+ this._setLoadedFlag(false);
+ this._setInitializedFlag(false);
+ this._setModelInfo(modelInfo);
+ this._setDeQuantizeParams([]);
+ this._setBackend(null);
+ };
+
+ // _configureBackend = ()=> {
+ // if (this._ieCore !== null) {
+ // const availableDevices = this._ieCore.getAvailableDevices();
+ // const brows = $('.device');
+ // brows.empty();
+ // for (const device of availableDevices) {
+ // const deviceName = device.replace(/ \(.*\)$/, '');
+ // const deviceLow = deviceName.toLowerCase();
+ // if (device === "GNA") {
+ // continue;
+ // }
+ // if (device === "CPU") {
+ // brows.append($(``));
+ // brows.append($(``));
+ // } else {
+ // brows.append($(``));
+ // brows.append($(``));
+ // }
+ // }
+ // } else {
+ // throw new Error(`The infernece-engine-node is not worked, please check the Node.js platform is enabled`);
+ // }
+ // }
+
_setDeQuantizeParams = (params) => {
this._deQuantizeParams = params;
};
@@ -58,19 +101,12 @@ class OpenVINORunner extends BaseRunner {
}
};
- /** @override */
- doInitialization = (modelInfo) => {
- this._setLoadedFlag(false);
- this._setInitializedFlag(false);
- this._setModelInfo(modelInfo);
- this._setDeQuantizeParams([]);
- };
-
/** @override */
_doCompile = async (options) => {
const modelFormat = this._currentModelInfo.format;
+ const device = options.backend;
if (modelFormat === 'OpenVINO') {
- let exec_net = await this._ieCore.loadNetwork(this._network, "CPU");
+ let exec_net = await this._ieCore.loadNetwork(this._network, device);
this._execNet = exec_net;
this._postOptions = this._currentModelInfo.postOptions || {};
} else {
From 4ed62179040c667820bffac6291823be1abf4983 Mon Sep 17 00:00:00 2001
From: lionkun <871518554@qq.com>
Date: Tue, 8 Dec 2020 15:12:15 +0800
Subject: [PATCH 4/4] delete useless comment and console.log
---
examples/image_classification/main.js | 1 -
examples/static/js/ui.common.js | 2 +-
examples/util/BaseExample.js | 3 +-
examples/util/OpenVINORunner.js | 28 -------------------
.../util/openvino/OpenVINOModelImporter.js | 3 --
5 files changed, 2 insertions(+), 35 deletions(-)
diff --git a/examples/image_classification/main.js b/examples/image_classification/main.js
index d8c925df1..cfddbaa3e 100644
--- a/examples/image_classification/main.js
+++ b/examples/image_classification/main.js
@@ -1,7 +1,6 @@
const example = new ImageClassificationExample({model: imageClassificationModels});
let specialoffer = () => {
- //http://localhost:8080/examples/image_classification_opencv/?prefer=none&b=WASM&m=resnet50_v1_openvino&s=image&d=0&f=WebNN
let f = parseSearchParams('f')
let url = location.href.replace('image_classification/', 'image_classification_opencv/')
let urlimg = url.replace('s=camera', 's=image')
diff --git a/examples/static/js/ui.common.js b/examples/static/js/ui.common.js
index 13037d308..e13601156 100644
--- a/examples/static/js/ui.common.js
+++ b/examples/static/js/ui.common.js
@@ -176,7 +176,7 @@ const singleModelTable = (modelList, category) => {
const setModelComponents = (models, selectedModelIdStr) => {
$('.model').remove();
let formatTypes = [];
- console.log(models);
+
for (let [category, modelList] of Object.entries(models)) {
let formats = singleModelTable(modelList, category);
formatTypes.push(...formats);
diff --git a/examples/util/BaseExample.js b/examples/util/BaseExample.js
index fa67e4863..773c775b3 100644
--- a/examples/util/BaseExample.js
+++ b/examples/util/BaseExample.js
@@ -888,7 +888,6 @@ class BaseExample extends BaseApp {
* then shows the post processing of inference result on UI.
*/
main = async () => {
- console.log(this._runtimeInitialized);
if (!this._runtimeInitialized) {
console.log(`Runtime isn't initialized`);
return;
@@ -902,7 +901,7 @@ class BaseExample extends BaseApp {
} else if (this._currentFramework === 'OpenVINO.js') {
updateTitleComponent(this._currentOpenVINOJSBackend, null, this._currentModelId, this._inferenceModels);
}
- console.log(location.search);
+
if (this._currentModelId === 'none') {
showErrorComponent('No model selected', 'Please select model to start prediction.');
diff --git a/examples/util/OpenVINORunner.js b/examples/util/OpenVINORunner.js
index 2d72d7a0f..43567091b 100644
--- a/examples/util/OpenVINORunner.js
+++ b/examples/util/OpenVINORunner.js
@@ -38,34 +38,6 @@ class OpenVINORunner extends BaseRunner {
this._setBackend(null);
};
- // _configureBackend = ()=> {
- // if (this._ieCore !== null) {
- // const availableDevices = this._ieCore.getAvailableDevices();
- // const brows = $('.device');
- // brows.empty();
- // for (const device of availableDevices) {
- // const deviceName = device.replace(/ \(.*\)$/, '');
- // const deviceLow = deviceName.toLowerCase();
- // if (device === "GNA") {
- // continue;
- // }
- // if (device === "CPU") {
- // brows.append($(``));
- // brows.append($(``));
- // } else {
- // brows.append($(``));
- // brows.append($(``));
- // }
- // }
- // } else {
- // throw new Error(`The infernece-engine-node is not worked, please check the Node.js platform is enabled`);
- // }
- // }
-
_setDeQuantizeParams = (params) => {
this._deQuantizeParams = params;
};
diff --git a/examples/util/openvino/OpenVINOModelImporter.js b/examples/util/openvino/OpenVINOModelImporter.js
index 36dd944e2..1343da722 100644
--- a/examples/util/openvino/OpenVINOModelImporter.js
+++ b/examples/util/openvino/OpenVINOModelImporter.js
@@ -46,9 +46,6 @@ class OpenVINOModelImporter {
supportedOps: this._supportedOps,
isOpenVINOModel: true,
};
- console.log(this._backend);
- console.log(this._bEagerMode);
- console.log(this._supportedOps);
this._model = await this._nn.createModel(options);
this._addTensorOperands();