Skip to content

Commit ecd5ca9

Browse files
authored
Merge pull request #3 from iwatake2222/feat/update_document
Feat/update document
2 parents 005430c + 0fb86cf commit ecd5ca9

File tree

8 files changed

+736
-72
lines changed

8 files changed

+736
-72
lines changed

.gitignore

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
.vscode/
2+
build/
3+
third_party/
4+
resource/

00_doc/class_diagram.drawio

+164
Large diffs are not rendered by default.

00_doc/class_diagram.png

116 KB
Loading

InferenceHelper/CMakeLists.txt

+15-16
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ if(INFERENCE_HELPER_ENABLE_TENSORRT)
2222
set(SRC ${SRC} InferenceHelperTensorRt.h InferenceHelperTensorRt.cpp)
2323
set(SRC ${SRC} TensorRT/logger.cpp TensorRT/BatchStream.h TensorRT/common.h TensorRT/EntropyCalibrator.h TensorRT/logger.h TensorRT/logging.h)
2424
endif()
25-
if(INFERENCE_HELPER_ENABLE_TFLITE)
25+
if(INFERENCE_HELPER_ENABLE_TFLITE OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
2626
set(SRC ${SRC} InferenceHelperTensorflowLite.h InferenceHelperTensorflowLite.cpp)
2727
endif()
2828
if(INFERENCE_HELPER_ENABLE_NCNN)
@@ -75,19 +75,16 @@ if(INFERENCE_HELPER_ENABLE_TENSORRT)
7575
endif()
7676

7777
# For Tensorflow Lite
78-
if(INFERENCE_HELPER_ENABLE_TFLITE)
79-
include(${CMAKE_CURRENT_LIST_DIR}/../third_party/cmakes/tflite.cmake)
78+
if(INFERENCE_HELPER_ENABLE_TFLITE OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
79+
include(${THIRD_PARTY_DIR}/cmakes/tflite.cmake)
8080
target_include_directories(${LibraryName} PUBLIC ${TFLITE_INC})
8181
target_link_libraries(${LibraryName} ${TFLITE_LIB})
8282
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE)
8383
endif()
8484

85-
# For Tensorflow Lite Delegate(Edge TPU)
86-
if(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
87-
include(${CMAKE_CURRENT_LIST_DIR}/../third_party/cmakes/tflite_edgetpu.cmake)
88-
target_include_directories(${LibraryName} PUBLIC ${TFLITE_EDGETPI_INC})
89-
target_link_libraries(${LibraryName} ${TFLITE_EDGETPI_LIB})
90-
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
85+
# For Tensorflow Lite Delegate(XNNPACK)
86+
if(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK)
87+
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK)
9188
endif()
9289

9390
# For Tensorflow Lite Delegate(GPU)
@@ -97,21 +94,23 @@ if(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU)
9794
target_include_directories(${LibraryName} PUBLIC ${OpenCL_INCLUDE_DIRS})
9895
target_link_libraries(${LibraryName} ${OpenCL_LIBRARIES})
9996
endif()
100-
include(${CMAKE_CURRENT_LIST_DIR}/../third_party/cmakes/tflite_gpu.cmake)
97+
include(${THIRD_PARTY_DIR}/cmakes/tflite_gpu.cmake)
10198
target_include_directories(${LibraryName} PUBLIC ${TFLITE_GPU_INC})
10299
target_link_libraries(${LibraryName} ${TFLITE_GPU_LIB} EGL GLESv2)
103100
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU)
104101
endif()
105102

106-
# For Tensorflow Lite Delegate(XNNPACK)
107-
if(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK)
108-
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK)
103+
# For Tensorflow Lite Delegate(Edge TPU)
104+
if(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
105+
include(${THIRD_PARTY_DIR}/cmakes/tflite_edgetpu.cmake)
106+
target_include_directories(${LibraryName} PUBLIC ${TFLITE_EDGETPU_INC})
107+
target_link_libraries(${LibraryName} ${TFLITE_EDGETPU_LIB})
108+
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
109109
endif()
110110

111-
112111
# For NCNN
113112
if(INFERENCE_HELPER_ENABLE_NCNN)
114-
include(${CMAKE_CURRENT_LIST_DIR}/../third_party/cmakes/ncnn.cmake)
113+
include(${THIRD_PARTY_DIR}/cmakes/ncnn.cmake)
115114
target_include_directories(${LibraryName} PUBLIC ${NCNN_INC})
116115
target_link_libraries(${LibraryName} ${NCNN_LIB})
117116
add_definitions(-DINFERENCE_HELPER_ENABLE_NCNN)
@@ -120,7 +119,7 @@ endif()
120119

121120
# For MNN
122121
if(INFERENCE_HELPER_ENABLE_MNN)
123-
include(${CMAKE_CURRENT_LIST_DIR}/../third_party/cmakes/MNN.cmake)
122+
include(${THIRD_PARTY_DIR}/cmakes/MNN.cmake)
124123
target_include_directories(${LibraryName} PUBLIC ${MNN_INC})
125124
target_link_libraries(${LibraryName} ${MNN_LIB})
126125
add_definitions(-DINFERENCE_HELPER_ENABLE_MNN)

InferenceHelper/InferenceHelper.cpp

+17-15
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@
1717
#ifdef INFERENCE_HELPER_ENABLE_OPENCV
1818
#include "InferenceHelperOpenCV.h"
1919
#endif
20+
#if defined(INFERENCE_HELPER_ENABLE_TFLITE) || defined(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK) || defined(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU) || defined(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
21+
#include "InferenceHelperTensorflowLite.h"
22+
#endif
2023
#ifdef INFERENCE_HELPER_ENABLE_TENSORRT
2124
#include "InferenceHelperTensorRt.h"
2225
#endif
23-
#ifdef INFERENCE_HELPER_ENABLE_TFLITE
24-
#include "InferenceHelperTensorflowLite.h"
25-
#endif
2626
#ifdef INFERENCE_HELPER_ENABLE_NCNN
2727
#include "InferenceHelperNcnn.h"
2828
#endif
@@ -47,20 +47,15 @@ InferenceHelper* InferenceHelper::create(const InferenceHelper::HELPER_TYPE type
4747
p = new InferenceHelperOpenCV();
4848
break;
4949
#endif
50-
#ifdef INFERENCE_HELPER_ENABLE_TENSORRT
51-
case TENSOR_RT:
52-
PRINT("Use TensorRT \n");
53-
p = new InferenceHelperTensorRt();
54-
break;
55-
#endif
5650
#ifdef INFERENCE_HELPER_ENABLE_TFLITE
5751
case TENSORFLOW_LITE:
5852
PRINT("Use TensorflowLite\n");
5953
p = new InferenceHelperTensorflowLite();
6054
break;
61-
#ifdef INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU
62-
case TENSORFLOW_LITE_EDGETPU:
63-
PRINT("Use TensorflowLite EdgeTPU Delegate\n");
55+
#endif
56+
#ifdef INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK
57+
case TENSORFLOW_LITE_XNNPACK:
58+
PRINT("Use TensorflowLite XNNPACK Delegate\n");
6459
p = new InferenceHelperTensorflowLite();
6560
break;
6661
#endif
@@ -70,12 +65,17 @@ InferenceHelper* InferenceHelper::create(const InferenceHelper::HELPER_TYPE type
7065
p = new InferenceHelperTensorflowLite();
7166
break;
7267
#endif
73-
#ifdef INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK
74-
case TENSORFLOW_LITE_XNNPACK:
75-
PRINT("Use TensorflowLite XNNPACK Delegate\n");
68+
#ifdef INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU
69+
case TENSORFLOW_LITE_EDGETPU:
70+
PRINT("Use TensorflowLite EdgeTPU Delegate\n");
7671
p = new InferenceHelperTensorflowLite();
7772
break;
7873
#endif
74+
#ifdef INFERENCE_HELPER_ENABLE_TENSORRT
75+
case TENSOR_RT:
76+
PRINT("Use TensorRT \n");
77+
p = new InferenceHelperTensorRt();
78+
break;
7979
#endif
8080
#ifdef INFERENCE_HELPER_ENABLE_NCNN
8181
case NCNN:
@@ -174,5 +174,7 @@ void InferenceHelper::preProcessByOpenCV(const InputTensorInfo& inputTensorInfo,
174174
/* For the environment where OpenCV is not supported */
175175
void InferenceHelper::preProcessByOpenCV(const InputTensorInfo& inputTensorInfo, bool isNCHW, cv::Mat& imgBlob)
176176
{
177+
PRINT_E("[preProcessByOpenCV] Unsupported function called\n");
178+
exit(-1);
177179
}
178180
#endif

InferenceHelper/InferenceHelper.h

+13-12
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <string>
88
#include <vector>
99
#include <array>
10+
#include <memory>
1011

1112
class TensorInfo {
1213
public:
@@ -100,29 +101,29 @@ class OutputTensorInfo : public TensorInfo {
100101
data = nullptr;
101102
quant.scale = 0;
102103
quant.zeroPoint = 0;
103-
m_dataFp32 = nullptr;
104+
m_dataFp32.reset();
104105
}
105106

106107
~OutputTensorInfo() {
107-
if (m_dataFp32 != nullptr) {
108-
delete[] m_dataFp32;
108+
if (m_dataFp32) {
109+
m_dataFp32.reset();
109110
}
110111
}
111112

112113
float* getDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */
113114
if (tensorType == TENSOR_TYPE_UINT8) {
114115
int32_t dataNum = 1;
115116
dataNum = tensorDims.batch * tensorDims.channel * tensorDims.height * tensorDims.width;
116-
if (m_dataFp32 == nullptr) {
117-
m_dataFp32 = new float[dataNum];
117+
if (!m_dataFp32) {
118+
m_dataFp32.reset(new float[dataNum]);
118119
}
119120
#pragma omp parallel
120121
for (int32_t i = 0; i < dataNum; i++) {
121122
const uint8_t* valUint8 = static_cast<const uint8_t*>(data);
122123
float valFloat = (valUint8[i] - quant.zeroPoint) * quant.scale;
123124
m_dataFp32[i] = valFloat;
124125
}
125-
return m_dataFp32;
126+
return m_dataFp32.get();
126127
} else if (tensorType == TENSOR_TYPE_FP32) {
127128
return static_cast<float*>(data);
128129
} else {
@@ -138,7 +139,7 @@ class OutputTensorInfo : public TensorInfo {
138139
} quant; // [Out] Parameters for dequantization (convert uint8 to float)
139140

140141
private:
141-
float* m_dataFp32;
142+
std::shared_ptr<float[]> m_dataFp32;
142143
};
143144

144145

@@ -154,15 +155,15 @@ class InferenceHelper {
154155
};
155156

156157
typedef enum {
157-
TENSOR_RT,
158+
OPEN_CV,
159+
OPEN_CV_GPU,
158160
TENSORFLOW_LITE,
159-
TENSORFLOW_LITE_EDGETPU,
160-
TENSORFLOW_LITE_GPU,
161161
TENSORFLOW_LITE_XNNPACK,
162+
TENSORFLOW_LITE_GPU,
163+
TENSORFLOW_LITE_EDGETPU,
164+
TENSOR_RT,
162165
NCNN,
163166
MNN,
164-
OPEN_CV,
165-
OPEN_CV_GPU,
166167
} HELPER_TYPE;
167168

168169
public:

0 commit comments

Comments
 (0)