@@ -39,165 +39,165 @@ limitations under the License.
39
39
/* ** Function ***/
40
40
InferenceHelperNcnn::InferenceHelperNcnn ()
41
41
{
42
- m_numThread = 1 ;
42
+ num_threads_ = 1 ;
43
43
}
44
44
45
45
InferenceHelperNcnn::~InferenceHelperNcnn ()
46
46
{
47
47
}
48
48
49
- int32_t InferenceHelperNcnn::setNumThread (const int32_t numThread )
49
+ int32_t InferenceHelperNcnn::SetNumThreads (const int32_t num_threads )
50
50
{
51
- m_numThread = numThread ;
52
- return RET_OK ;
51
+ num_threads_ = num_threads ;
52
+ return kRetOk ;
53
53
}
54
54
55
- int32_t InferenceHelperNcnn::setCustomOps (const std::vector<std::pair<const char *, const void *>>& customOps )
55
+ int32_t InferenceHelperNcnn::SetCustomOps (const std::vector<std::pair<const char *, const void *>>& custom_ops )
56
56
{
57
57
PRINT (" [WARNING] This method is not supported\n " );
58
- return RET_OK ;
58
+ return kRetOk ;
59
59
}
60
60
61
- int32_t InferenceHelperNcnn::initialize (const std::string& modelFilename , std::vector<InputTensorInfo>& inputTensorInfoList , std::vector<OutputTensorInfo>& outputTensorInfoList )
61
+ int32_t InferenceHelperNcnn::Initialize (const std::string& model_filename , std::vector<InputTensorInfo>& input_tensor_info_list , std::vector<OutputTensorInfo>& output_tensor_info_list )
62
62
{
63
63
/* ** Create network ***/
64
- m_net .reset (new ncnn::Net ());
65
- m_net ->opt .use_fp16_arithmetic = true ;
66
- m_net ->opt .use_fp16_packed = true ;
67
- m_net ->opt .use_fp16_storage = true ;
68
-
69
- std::string binFilename = modelFilename ;
70
- if (modelFilename .find (" .param" ) == std::string::npos) {
71
- PRINT_E (" Invalid model param filename (%s)\n " , modelFilename .c_str ());
72
- return RET_ERR ;
64
+ net_ .reset (new ncnn::Net ());
65
+ net_ ->opt .use_fp16_arithmetic = true ;
66
+ net_ ->opt .use_fp16_packed = true ;
67
+ net_ ->opt .use_fp16_storage = true ;
68
+
69
+ std::string bin_filename = model_filename ;
70
+ if (model_filename .find (" .param" ) == std::string::npos) {
71
+ PRINT_E (" Invalid model param filename (%s)\n " , model_filename .c_str ());
72
+ return kRetErr ;
73
73
}
74
- binFilename = binFilename .replace (binFilename .find (" .param" ), std::string (" .param" ).length (), " .bin\0 " );
75
- if (m_net ->load_param (modelFilename .c_str ()) != 0 ) {
76
- PRINT_E (" Failed to load model param file (%s)\n " , modelFilename .c_str ());
77
- return RET_ERR ;
74
+ bin_filename = bin_filename .replace (bin_filename .find (" .param" ), std::string (" .param" ).length (), " .bin\0 " );
75
+ if (net_ ->load_param (model_filename .c_str ()) != 0 ) {
76
+ PRINT_E (" Failed to load model param file (%s)\n " , model_filename .c_str ());
77
+ return kRetErr ;
78
78
}
79
- if (m_net ->load_model (binFilename .c_str ()) != 0 ) {
80
- PRINT_E (" Failed to load model bin file (%s)\n " , binFilename .c_str ());
81
- return RET_ERR ;
79
+ if (net_ ->load_model (bin_filename .c_str ()) != 0 ) {
80
+ PRINT_E (" Failed to load model bin file (%s)\n " , bin_filename .c_str ());
81
+ return kRetErr ;
82
82
}
83
83
84
84
/* Convert normalize parameter to speed up */
85
- for (auto & inputTensorInfo : inputTensorInfoList ) {
86
- convertNormalizeParameters (inputTensorInfo );
85
+ for (auto & input_tensor_info : input_tensor_info_list ) {
86
+ ConvertNormalizeParameters (input_tensor_info );
87
87
}
88
88
89
- return RET_OK ;
89
+ return kRetOk ;
90
90
};
91
91
92
92
93
- int32_t InferenceHelperNcnn::finalize (void )
93
+ int32_t InferenceHelperNcnn::Finalize (void )
94
94
{
95
- m_net .reset ();
96
- m_inMatList .clear ();
97
- m_outMatList .clear ();
98
- return RET_ERR ;
95
+ net_ .reset ();
96
+ in_mat_list_ .clear ();
97
+ out_mat_list_ .clear ();
98
+ return kRetErr ;
99
99
}
100
100
101
- int32_t InferenceHelperNcnn::preProcess (const std::vector<InputTensorInfo>& inputTensorInfoList )
101
+ int32_t InferenceHelperNcnn::PreProcess (const std::vector<InputTensorInfo>& input_tensor_info_list )
102
102
{
103
- m_inMatList .clear ();
104
- for (const auto & inputTensorInfo : inputTensorInfoList ) {
105
- ncnn::Mat ncnnMat ;
106
- if (inputTensorInfo. dataType == InputTensorInfo::DATA_TYPE_IMAGE ) {
103
+ in_mat_list_ .clear ();
104
+ for (const auto & input_tensor_info : input_tensor_info_list ) {
105
+ ncnn::Mat ncnn_mat ;
106
+ if (input_tensor_info. data_type == InputTensorInfo::kDataTypeImage ) {
107
107
/* Crop */
108
- if ((inputTensorInfo. imageInfo .width != inputTensorInfo. imageInfo . cropWidth ) || (inputTensorInfo. imageInfo .height != inputTensorInfo. imageInfo . cropHeight )) {
108
+ if ((input_tensor_info. image_info .width != input_tensor_info. image_info . crop_width ) || (input_tensor_info. image_info .height != input_tensor_info. image_info . crop_height )) {
109
109
PRINT_E (" Crop is not supported\n " );
110
- return RET_ERR ;
110
+ return kRetErr ;
111
111
}
112
112
/* Convert color type */
113
- int32_t pixelType = 0 ;
114
- if ((inputTensorInfo. imageInfo .channel == 3 ) && (inputTensorInfo. tensorDims .channel == 3 )) {
115
- pixelType = (inputTensorInfo. imageInfo . isBGR ) ? ncnn::Mat::PIXEL_BGR : ncnn::Mat::PIXEL_RGB;
116
- if (inputTensorInfo. imageInfo . swapColor ) {
117
- pixelType = (inputTensorInfo. imageInfo . isBGR ) ? ncnn::Mat::PIXEL_BGR2RGB : ncnn::Mat::PIXEL_RGB2BGR;
113
+ int32_t pixel_type = 0 ;
114
+ if ((input_tensor_info. image_info .channel == 3 ) && (input_tensor_info. tensor_dims .channel == 3 )) {
115
+ pixel_type = (input_tensor_info. image_info . is_bgr ) ? ncnn::Mat::PIXEL_BGR : ncnn::Mat::PIXEL_RGB;
116
+ if (input_tensor_info. image_info . swap_color ) {
117
+ pixel_type = (input_tensor_info. image_info . is_bgr ) ? ncnn::Mat::PIXEL_BGR2RGB : ncnn::Mat::PIXEL_RGB2BGR;
118
118
}
119
- } else if ((inputTensorInfo. imageInfo .channel == 1 ) && (inputTensorInfo. tensorDims .channel == 1 )) {
120
- pixelType = ncnn::Mat::PIXEL_GRAY;
121
- } else if ((inputTensorInfo. imageInfo .channel == 3 ) && (inputTensorInfo. tensorDims .channel == 1 )) {
122
- pixelType = (inputTensorInfo. imageInfo . isBGR ) ? ncnn::Mat::PIXEL_BGR2GRAY : ncnn::Mat::PIXEL_RGB2GRAY;
123
- } else if ((inputTensorInfo. imageInfo .channel == 1 ) && (inputTensorInfo. tensorDims .channel == 3 )) {
124
- pixelType = ncnn::Mat::PIXEL_GRAY2RGB;
119
+ } else if ((input_tensor_info. image_info .channel == 1 ) && (input_tensor_info. tensor_dims .channel == 1 )) {
120
+ pixel_type = ncnn::Mat::PIXEL_GRAY;
121
+ } else if ((input_tensor_info. image_info .channel == 3 ) && (input_tensor_info. tensor_dims .channel == 1 )) {
122
+ pixel_type = (input_tensor_info. image_info . is_bgr ) ? ncnn::Mat::PIXEL_BGR2GRAY : ncnn::Mat::PIXEL_RGB2GRAY;
123
+ } else if ((input_tensor_info. image_info .channel == 1 ) && (input_tensor_info. tensor_dims .channel == 3 )) {
124
+ pixel_type = ncnn::Mat::PIXEL_GRAY2RGB;
125
125
} else {
126
- PRINT_E (" Unsupported color conversion (%d, %d)\n " , inputTensorInfo. imageInfo .channel , inputTensorInfo. tensorDims .channel );
127
- return RET_ERR ;
126
+ PRINT_E (" Unsupported color conversion (%d, %d)\n " , input_tensor_info. image_info .channel , input_tensor_info. tensor_dims .channel );
127
+ return kRetErr ;
128
128
}
129
129
130
- if (inputTensorInfo. imageInfo . cropWidth == inputTensorInfo. tensorDims .width && inputTensorInfo. imageInfo . cropHeight == inputTensorInfo. tensorDims .height ) {
130
+ if (input_tensor_info. image_info . crop_width == input_tensor_info. tensor_dims .width && input_tensor_info. image_info . crop_height == input_tensor_info. tensor_dims .height ) {
131
131
/* Convert to blob */
132
- ncnnMat = ncnn::Mat::from_pixels ((uint8_t *)inputTensorInfo .data , pixelType, inputTensorInfo. imageInfo .width , inputTensorInfo. imageInfo .height );
132
+ ncnn_mat = ncnn::Mat::from_pixels ((uint8_t *)input_tensor_info .data , pixel_type, input_tensor_info. image_info .width , input_tensor_info. image_info .height );
133
133
} else {
134
134
/* Convert to blob with resize */
135
- ncnnMat = ncnn::Mat::from_pixels_resize ((uint8_t *)inputTensorInfo .data , pixelType, inputTensorInfo. imageInfo .width , inputTensorInfo. imageInfo .height , inputTensorInfo. tensorDims .width , inputTensorInfo. tensorDims .height );
135
+ ncnn_mat = ncnn::Mat::from_pixels_resize ((uint8_t *)input_tensor_info .data , pixel_type, input_tensor_info. image_info .width , input_tensor_info. image_info .height , input_tensor_info. tensor_dims .width , input_tensor_info. tensor_dims .height );
136
136
}
137
137
/* Normalize image */
138
- ncnnMat .substract_mean_normalize (inputTensorInfo .normalize .mean , inputTensorInfo .normalize .norm );
139
- } else if (inputTensorInfo. dataType == InputTensorInfo::DATA_TYPE_BLOB_NHWC ) {
140
- PRINT_E (" [ToDo] Unsupported data type (%d)\n " , inputTensorInfo. dataType );
141
- ncnnMat = ncnn::Mat::from_pixels ((uint8_t *)inputTensorInfo .data , inputTensorInfo. tensorDims .channel == 3 ? ncnn::Mat::PIXEL_RGB : ncnn::Mat::PIXEL_GRAY, inputTensorInfo. tensorDims .width , inputTensorInfo. tensorDims .height );
142
- } else if (inputTensorInfo. dataType == InputTensorInfo::DATA_TYPE_BLOB_NCHW ) {
143
- ncnnMat = ncnn::Mat (inputTensorInfo. tensorDims .width , inputTensorInfo. tensorDims .height , inputTensorInfo. tensorDims .channel , inputTensorInfo .data );
138
+ ncnn_mat .substract_mean_normalize (input_tensor_info .normalize .mean , input_tensor_info .normalize .norm );
139
+ } else if (input_tensor_info. data_type == InputTensorInfo::kDataTypeBlobNhwc ) {
140
+ PRINT_E (" [ToDo] Unsupported data type (%d)\n " , input_tensor_info. data_type );
141
+ ncnn_mat = ncnn::Mat::from_pixels ((uint8_t *)input_tensor_info .data , input_tensor_info. tensor_dims .channel == 3 ? ncnn::Mat::PIXEL_RGB : ncnn::Mat::PIXEL_GRAY, input_tensor_info. tensor_dims .width , input_tensor_info. tensor_dims .height );
142
+ } else if (input_tensor_info. data_type == InputTensorInfo::kDataTypeBlobNchw ) {
143
+ ncnn_mat = ncnn::Mat (input_tensor_info. tensor_dims .width , input_tensor_info. tensor_dims .height , input_tensor_info. tensor_dims .channel , input_tensor_info .data );
144
144
} else {
145
- PRINT_E (" Unsupported data type (%d)\n " , inputTensorInfo. dataType );
146
- return RET_ERR ;
145
+ PRINT_E (" Unsupported data type (%d)\n " , input_tensor_info. data_type );
146
+ return kRetErr ;
147
147
}
148
- m_inMatList .push_back (std::pair<std::string, ncnn::Mat>(inputTensorInfo .name , ncnnMat ));
148
+ in_mat_list_ .push_back (std::pair<std::string, ncnn::Mat>(input_tensor_info .name , ncnn_mat ));
149
149
}
150
- return RET_OK ;
150
+ return kRetOk ;
151
151
}
152
152
153
- int32_t InferenceHelperNcnn::invoke (std::vector<OutputTensorInfo>& outputTensorInfoList )
153
+ int32_t InferenceHelperNcnn::Process (std::vector<OutputTensorInfo>& output_tensor_info_list )
154
154
{
155
- ncnn::Extractor ex = m_net ->create_extractor ();
155
+ ncnn::Extractor ex = net_ ->create_extractor ();
156
156
ex.set_light_mode (true );
157
- ex.set_num_threads (m_numThread );
158
- for (const auto & inputMat : m_inMatList ) {
157
+ ex.set_num_threads (num_threads_ );
158
+ for (const auto & inputMat : in_mat_list_ ) {
159
159
if (ex.input (inputMat.first .c_str (), inputMat.second ) != 0 ) {
160
160
PRINT_E (" Input mat error (%s)\n " , inputMat.first .c_str ());
161
- return RET_ERR ;
161
+ return kRetErr ;
162
162
}
163
163
}
164
164
165
- m_outMatList .clear ();
166
- for (auto & outputTensorInfo : outputTensorInfoList ) {
167
- ncnn::Mat ncnnOut ;
168
- if (ex.extract (outputTensorInfo .name .c_str (), ncnnOut ) != 0 ) {
169
- PRINT_E (" Output mat error (%s)\n " , outputTensorInfo .name .c_str ());
170
- return RET_ERR ;
165
+ out_mat_list_ .clear ();
166
+ for (auto & output_tensor_info : output_tensor_info_list ) {
167
+ ncnn::Mat ncnn_out ;
168
+ if (ex.extract (output_tensor_info .name .c_str (), ncnn_out ) != 0 ) {
169
+ PRINT_E (" Output mat error (%s)\n " , output_tensor_info .name .c_str ());
170
+ return kRetErr ;
171
171
}
172
- m_outMatList .push_back (ncnnOut ); // store ncnn mat in member variable so that data keep exist
173
- outputTensorInfo .data = ncnnOut .data ;
174
- outputTensorInfo. tensorDims .batch = 1 ;
175
- outputTensorInfo. tensorDims .channel = ncnnOut .c ;
176
- outputTensorInfo. tensorDims .height = ncnnOut .h ;
177
- outputTensorInfo. tensorDims .width = ncnnOut .w ;
172
+ out_mat_list_ .push_back (ncnn_out ); // store ncnn mat in member variable so that data keep exist
173
+ output_tensor_info .data = ncnn_out .data ;
174
+ output_tensor_info. tensor_dims .batch = 1 ;
175
+ output_tensor_info. tensor_dims .channel = ncnn_out .c ;
176
+ output_tensor_info. tensor_dims .height = ncnn_out .h ;
177
+ output_tensor_info. tensor_dims .width = ncnn_out .w ;
178
178
}
179
179
180
- return RET_OK ;
180
+ return kRetOk ;
181
181
}
182
182
183
- void InferenceHelperNcnn::convertNormalizeParameters (InputTensorInfo& inputTensorInfo )
183
+ void InferenceHelperNcnn::ConvertNormalizeParameters (InputTensorInfo& tensor_info )
184
184
{
185
- if (inputTensorInfo. dataType != InputTensorInfo::DATA_TYPE_IMAGE ) return ;
185
+ if (tensor_info. data_type != InputTensorInfo::kDataTypeImage ) return ;
186
186
187
187
#if 0
188
188
/* Convert to speeden up normalization: ((src / 255) - mean) / norm = src * 1 / (255 * norm) - (mean / norm) */
189
189
for (int32_t i = 0; i < 3; i++) {
190
- inputTensorInfo .normalize.mean[i] /= inputTensorInfo .normalize.norm[i];
191
- inputTensorInfo .normalize.norm[i] *= 255.0f;
192
- inputTensorInfo .normalize.norm[i] = 1.0f / inputTensorInfo .normalize.norm[i];
190
+ tensor_info .normalize.mean[i] /= tensor_info .normalize.norm[i];
191
+ tensor_info .normalize.norm[i] *= 255.0f;
192
+ tensor_info .normalize.norm[i] = 1.0f / tensor_info .normalize.norm[i];
193
193
}
194
194
#endif
195
195
#if 1
196
196
/* Convert to speeden up normalization: ((src / 255) - mean) / norm = (src - (mean * 255)) * (1 / (255 * norm)) */
197
197
for (int32_t i = 0 ; i < 3 ; i++) {
198
- inputTensorInfo .normalize .mean [i] *= 255 .0f ;
199
- inputTensorInfo .normalize .norm [i] *= 255 .0f ;
200
- inputTensorInfo .normalize .norm [i] = 1 .0f / inputTensorInfo .normalize .norm [i];
198
+ tensor_info .normalize .mean [i] *= 255 .0f ;
199
+ tensor_info .normalize .norm [i] *= 255 .0f ;
200
+ tensor_info .normalize .norm [i] = 1 .0f / tensor_info .normalize .norm [i];
201
201
}
202
202
#endif
203
203
}
0 commit comments