Skip to content

Commit 82d38ce

Browse files
committed
Apply Google C++ Style Guide (partly)
1 parent 5198c14 commit 82d38ce

14 files changed

+1020
-970
lines changed

inference_helper/inference_helper_mnn.cpp

+124-124
Large diffs are not rendered by default.

inference_helper/inference_helper_mnn.h

+11-11
Original file line numberDiff line numberDiff line change
@@ -35,21 +35,21 @@ class InferenceHelperMnn : public InferenceHelper {
3535
public:
3636
InferenceHelperMnn();
3737
~InferenceHelperMnn() override;
38-
int32_t setNumThread(const int32_t numThread) override;
39-
int32_t setCustomOps(const std::vector<std::pair<const char*, const void*>>& customOps) override;
40-
int32_t initialize(const std::string& modelFilename, std::vector<InputTensorInfo>& inputTensorInfoList, std::vector<OutputTensorInfo>& outputTensorInfoList) override;
41-
int32_t finalize(void) override;
42-
int32_t preProcess(const std::vector<InputTensorInfo>& inputTensorInfoList) override;
43-
int32_t invoke(std::vector<OutputTensorInfo>& outputTensorInfoList) override;
38+
int32_t SetNumThreads(const int32_t num_threads) override;
39+
int32_t SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops) override;
40+
int32_t Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) override;
41+
int32_t Finalize(void) override;
42+
int32_t PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list) override;
43+
int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) override;
4444

4545
private:
46-
void convertNormalizeParameters(InputTensorInfo& inputTensorInfo);
46+
void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
4747

4848
private:
49-
std::unique_ptr<MNN::Interpreter> m_net;
50-
MNN::Session* m_session;
51-
std::vector<std::unique_ptr<MNN::Tensor>> m_outMatList;
52-
int32_t m_numThread;
49+
std::unique_ptr<MNN::Interpreter> net_;
50+
MNN::Session* session_;
51+
std::vector<std::unique_ptr<MNN::Tensor>> out_mat_list_;
52+
int32_t num_threads_;
5353
};
5454

5555
#endif

inference_helper/inference_helper_ncnn.cpp

+90-90
Original file line numberDiff line numberDiff line change
@@ -39,165 +39,165 @@ limitations under the License.
3939
/*** Function ***/
4040
InferenceHelperNcnn::InferenceHelperNcnn()
4141
{
42-
m_numThread = 1;
42+
num_threads_ = 1;
4343
}
4444

4545
InferenceHelperNcnn::~InferenceHelperNcnn()
4646
{
4747
}
4848

49-
int32_t InferenceHelperNcnn::setNumThread(const int32_t numThread)
49+
int32_t InferenceHelperNcnn::SetNumThreads(const int32_t num_threads)
5050
{
51-
m_numThread = numThread;
52-
return RET_OK;
51+
num_threads_ = num_threads;
52+
return kRetOk;
5353
}
5454

55-
int32_t InferenceHelperNcnn::setCustomOps(const std::vector<std::pair<const char*, const void*>>& customOps)
55+
int32_t InferenceHelperNcnn::SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops)
5656
{
5757
PRINT("[WARNING] This method is not supported\n");
58-
return RET_OK;
58+
return kRetOk;
5959
}
6060

61-
int32_t InferenceHelperNcnn::initialize(const std::string& modelFilename, std::vector<InputTensorInfo>& inputTensorInfoList, std::vector<OutputTensorInfo>& outputTensorInfoList)
61+
int32_t InferenceHelperNcnn::Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list)
6262
{
6363
/*** Create network ***/
64-
m_net.reset(new ncnn::Net());
65-
m_net->opt.use_fp16_arithmetic = true;
66-
m_net->opt.use_fp16_packed = true;
67-
m_net->opt.use_fp16_storage = true;
68-
69-
std::string binFilename = modelFilename;
70-
if (modelFilename.find(".param") == std::string::npos) {
71-
PRINT_E("Invalid model param filename (%s)\n", modelFilename.c_str());
72-
return RET_ERR;
64+
net_.reset(new ncnn::Net());
65+
net_->opt.use_fp16_arithmetic = true;
66+
net_->opt.use_fp16_packed = true;
67+
net_->opt.use_fp16_storage = true;
68+
69+
std::string bin_filename = model_filename;
70+
if (model_filename.find(".param") == std::string::npos) {
71+
PRINT_E("Invalid model param filename (%s)\n", model_filename.c_str());
72+
return kRetErr;
7373
}
74-
binFilename = binFilename.replace(binFilename.find(".param"), std::string(".param").length(), ".bin\0");
75-
if (m_net->load_param(modelFilename.c_str()) != 0) {
76-
PRINT_E("Failed to load model param file (%s)\n", modelFilename.c_str());
77-
return RET_ERR;
74+
bin_filename = bin_filename.replace(bin_filename.find(".param"), std::string(".param").length(), ".bin\0");
75+
if (net_->load_param(model_filename.c_str()) != 0) {
76+
PRINT_E("Failed to load model param file (%s)\n", model_filename.c_str());
77+
return kRetErr;
7878
}
79-
if (m_net->load_model(binFilename.c_str()) != 0) {
80-
PRINT_E("Failed to load model bin file (%s)\n", binFilename.c_str());
81-
return RET_ERR;
79+
if (net_->load_model(bin_filename.c_str()) != 0) {
80+
PRINT_E("Failed to load model bin file (%s)\n", bin_filename.c_str());
81+
return kRetErr;
8282
}
8383

8484
/* Convert normalize parameter to speed up */
85-
for (auto& inputTensorInfo : inputTensorInfoList) {
86-
convertNormalizeParameters(inputTensorInfo);
85+
for (auto& input_tensor_info : input_tensor_info_list) {
86+
ConvertNormalizeParameters(input_tensor_info);
8787
}
8888

89-
return RET_OK;
89+
return kRetOk;
9090
};
9191

9292

93-
int32_t InferenceHelperNcnn::finalize(void)
93+
int32_t InferenceHelperNcnn::Finalize(void)
9494
{
95-
m_net.reset();
96-
m_inMatList.clear();
97-
m_outMatList.clear();
98-
return RET_ERR;
95+
net_.reset();
96+
in_mat_list_.clear();
97+
out_mat_list_.clear();
98+
return kRetErr;
9999
}
100100

101-
int32_t InferenceHelperNcnn::preProcess(const std::vector<InputTensorInfo>& inputTensorInfoList)
101+
int32_t InferenceHelperNcnn::PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list)
102102
{
103-
m_inMatList.clear();
104-
for (const auto& inputTensorInfo : inputTensorInfoList) {
105-
ncnn::Mat ncnnMat;
106-
if (inputTensorInfo.dataType == InputTensorInfo::DATA_TYPE_IMAGE) {
103+
in_mat_list_.clear();
104+
for (const auto& input_tensor_info : input_tensor_info_list) {
105+
ncnn::Mat ncnn_mat;
106+
if (input_tensor_info.data_type == InputTensorInfo::kDataTypeImage) {
107107
/* Crop */
108-
if ((inputTensorInfo.imageInfo.width != inputTensorInfo.imageInfo.cropWidth) || (inputTensorInfo.imageInfo.height != inputTensorInfo.imageInfo.cropHeight)) {
108+
if ((input_tensor_info.image_info.width != input_tensor_info.image_info.crop_width) || (input_tensor_info.image_info.height != input_tensor_info.image_info.crop_height)) {
109109
PRINT_E("Crop is not supported\n");
110-
return RET_ERR;
110+
return kRetErr;
111111
}
112112
/* Convert color type */
113-
int32_t pixelType = 0;
114-
if ((inputTensorInfo.imageInfo.channel == 3) && (inputTensorInfo.tensorDims.channel == 3)) {
115-
pixelType = (inputTensorInfo.imageInfo.isBGR) ? ncnn::Mat::PIXEL_BGR : ncnn::Mat::PIXEL_RGB;
116-
if (inputTensorInfo.imageInfo.swapColor) {
117-
pixelType = (inputTensorInfo.imageInfo.isBGR) ? ncnn::Mat::PIXEL_BGR2RGB : ncnn::Mat::PIXEL_RGB2BGR;
113+
int32_t pixel_type = 0;
114+
if ((input_tensor_info.image_info.channel == 3) && (input_tensor_info.tensor_dims.channel == 3)) {
115+
pixel_type = (input_tensor_info.image_info.is_bgr) ? ncnn::Mat::PIXEL_BGR : ncnn::Mat::PIXEL_RGB;
116+
if (input_tensor_info.image_info.swap_color) {
117+
pixel_type = (input_tensor_info.image_info.is_bgr) ? ncnn::Mat::PIXEL_BGR2RGB : ncnn::Mat::PIXEL_RGB2BGR;
118118
}
119-
} else if ((inputTensorInfo.imageInfo.channel == 1) && (inputTensorInfo.tensorDims.channel == 1)) {
120-
pixelType = ncnn::Mat::PIXEL_GRAY;
121-
} else if ((inputTensorInfo.imageInfo.channel == 3) && (inputTensorInfo.tensorDims.channel == 1)) {
122-
pixelType = (inputTensorInfo.imageInfo.isBGR) ? ncnn::Mat::PIXEL_BGR2GRAY : ncnn::Mat::PIXEL_RGB2GRAY;
123-
} else if ((inputTensorInfo.imageInfo.channel == 1) && (inputTensorInfo.tensorDims.channel == 3)) {
124-
pixelType = ncnn::Mat::PIXEL_GRAY2RGB;
119+
} else if ((input_tensor_info.image_info.channel == 1) && (input_tensor_info.tensor_dims.channel == 1)) {
120+
pixel_type = ncnn::Mat::PIXEL_GRAY;
121+
} else if ((input_tensor_info.image_info.channel == 3) && (input_tensor_info.tensor_dims.channel == 1)) {
122+
pixel_type = (input_tensor_info.image_info.is_bgr) ? ncnn::Mat::PIXEL_BGR2GRAY : ncnn::Mat::PIXEL_RGB2GRAY;
123+
} else if ((input_tensor_info.image_info.channel == 1) && (input_tensor_info.tensor_dims.channel == 3)) {
124+
pixel_type = ncnn::Mat::PIXEL_GRAY2RGB;
125125
} else {
126-
PRINT_E("Unsupported color conversion (%d, %d)\n", inputTensorInfo.imageInfo.channel, inputTensorInfo.tensorDims.channel);
127-
return RET_ERR;
126+
PRINT_E("Unsupported color conversion (%d, %d)\n", input_tensor_info.image_info.channel, input_tensor_info.tensor_dims.channel);
127+
return kRetErr;
128128
}
129129

130-
if (inputTensorInfo.imageInfo.cropWidth == inputTensorInfo.tensorDims.width && inputTensorInfo.imageInfo.cropHeight == inputTensorInfo.tensorDims.height) {
130+
if (input_tensor_info.image_info.crop_width == input_tensor_info.tensor_dims.width && input_tensor_info.image_info.crop_height == input_tensor_info.tensor_dims.height) {
131131
/* Convert to blob */
132-
ncnnMat = ncnn::Mat::from_pixels((uint8_t*)inputTensorInfo.data, pixelType, inputTensorInfo.imageInfo.width, inputTensorInfo.imageInfo.height);
132+
ncnn_mat = ncnn::Mat::from_pixels((uint8_t*)input_tensor_info.data, pixel_type, input_tensor_info.image_info.width, input_tensor_info.image_info.height);
133133
} else {
134134
/* Convert to blob with resize */
135-
ncnnMat = ncnn::Mat::from_pixels_resize((uint8_t*)inputTensorInfo.data, pixelType, inputTensorInfo.imageInfo.width, inputTensorInfo.imageInfo.height, inputTensorInfo.tensorDims.width, inputTensorInfo.tensorDims.height);
135+
ncnn_mat = ncnn::Mat::from_pixels_resize((uint8_t*)input_tensor_info.data, pixel_type, input_tensor_info.image_info.width, input_tensor_info.image_info.height, input_tensor_info.tensor_dims.width, input_tensor_info.tensor_dims.height);
136136
}
137137
/* Normalize image */
138-
ncnnMat.substract_mean_normalize(inputTensorInfo.normalize.mean, inputTensorInfo.normalize.norm);
139-
} else if (inputTensorInfo.dataType == InputTensorInfo::DATA_TYPE_BLOB_NHWC) {
140-
PRINT_E("[ToDo] Unsupported data type (%d)\n", inputTensorInfo.dataType);
141-
ncnnMat = ncnn::Mat::from_pixels((uint8_t*)inputTensorInfo.data, inputTensorInfo.tensorDims.channel == 3 ? ncnn::Mat::PIXEL_RGB : ncnn::Mat::PIXEL_GRAY, inputTensorInfo.tensorDims.width, inputTensorInfo.tensorDims.height);
142-
} else if (inputTensorInfo.dataType == InputTensorInfo::DATA_TYPE_BLOB_NCHW) {
143-
ncnnMat = ncnn::Mat(inputTensorInfo.tensorDims.width, inputTensorInfo.tensorDims.height, inputTensorInfo.tensorDims.channel, inputTensorInfo.data);
138+
ncnn_mat.substract_mean_normalize(input_tensor_info.normalize.mean, input_tensor_info.normalize.norm);
139+
} else if (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) {
140+
PRINT_E("[ToDo] Unsupported data type (%d)\n", input_tensor_info.data_type);
141+
ncnn_mat = ncnn::Mat::from_pixels((uint8_t*)input_tensor_info.data, input_tensor_info.tensor_dims.channel == 3 ? ncnn::Mat::PIXEL_RGB : ncnn::Mat::PIXEL_GRAY, input_tensor_info.tensor_dims.width, input_tensor_info.tensor_dims.height);
142+
} else if (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNchw) {
143+
ncnn_mat = ncnn::Mat(input_tensor_info.tensor_dims.width, input_tensor_info.tensor_dims.height, input_tensor_info.tensor_dims.channel, input_tensor_info.data);
144144
} else {
145-
PRINT_E("Unsupported data type (%d)\n", inputTensorInfo.dataType);
146-
return RET_ERR;
145+
PRINT_E("Unsupported data type (%d)\n", input_tensor_info.data_type);
146+
return kRetErr;
147147
}
148-
m_inMatList.push_back(std::pair<std::string, ncnn::Mat>(inputTensorInfo.name, ncnnMat));
148+
in_mat_list_.push_back(std::pair<std::string, ncnn::Mat>(input_tensor_info.name, ncnn_mat));
149149
}
150-
return RET_OK;
150+
return kRetOk;
151151
}
152152

153-
int32_t InferenceHelperNcnn::invoke(std::vector<OutputTensorInfo>& outputTensorInfoList)
153+
int32_t InferenceHelperNcnn::Process(std::vector<OutputTensorInfo>& output_tensor_info_list)
154154
{
155-
ncnn::Extractor ex = m_net->create_extractor();
155+
ncnn::Extractor ex = net_->create_extractor();
156156
ex.set_light_mode(true);
157-
ex.set_num_threads(m_numThread);
158-
for (const auto& inputMat : m_inMatList) {
157+
ex.set_num_threads(num_threads_);
158+
for (const auto& inputMat : in_mat_list_) {
159159
if (ex.input(inputMat.first.c_str(), inputMat.second) != 0) {
160160
PRINT_E("Input mat error (%s)\n", inputMat.first.c_str());
161-
return RET_ERR;
161+
return kRetErr;
162162
}
163163
}
164164

165-
m_outMatList.clear();
166-
for (auto& outputTensorInfo : outputTensorInfoList) {
167-
ncnn::Mat ncnnOut;
168-
if (ex.extract(outputTensorInfo.name.c_str(), ncnnOut) != 0) {
169-
PRINT_E("Output mat error (%s)\n", outputTensorInfo.name.c_str());
170-
return RET_ERR;
165+
out_mat_list_.clear();
166+
for (auto& output_tensor_info : output_tensor_info_list) {
167+
ncnn::Mat ncnn_out;
168+
if (ex.extract(output_tensor_info.name.c_str(), ncnn_out) != 0) {
169+
PRINT_E("Output mat error (%s)\n", output_tensor_info.name.c_str());
170+
return kRetErr;
171171
}
172-
m_outMatList.push_back(ncnnOut); // store ncnn mat in member variable so that data keep exist
173-
outputTensorInfo.data = ncnnOut.data;
174-
outputTensorInfo.tensorDims.batch = 1;
175-
outputTensorInfo.tensorDims.channel = ncnnOut.c;
176-
outputTensorInfo.tensorDims.height = ncnnOut.h;
177-
outputTensorInfo.tensorDims.width = ncnnOut.w;
172+
out_mat_list_.push_back(ncnn_out); // store ncnn mat in member variable so that data keep exist
173+
output_tensor_info.data = ncnn_out.data;
174+
output_tensor_info.tensor_dims.batch = 1;
175+
output_tensor_info.tensor_dims.channel = ncnn_out.c;
176+
output_tensor_info.tensor_dims.height = ncnn_out.h;
177+
output_tensor_info.tensor_dims.width = ncnn_out.w;
178178
}
179179

180-
return RET_OK;
180+
return kRetOk;
181181
}
182182

183-
void InferenceHelperNcnn::convertNormalizeParameters(InputTensorInfo& inputTensorInfo)
183+
void InferenceHelperNcnn::ConvertNormalizeParameters(InputTensorInfo& tensor_info)
184184
{
185-
if (inputTensorInfo.dataType != InputTensorInfo::DATA_TYPE_IMAGE) return;
185+
if (tensor_info.data_type != InputTensorInfo::kDataTypeImage) return;
186186

187187
#if 0
188188
/* Convert to speeden up normalization: ((src / 255) - mean) / norm = src * 1 / (255 * norm) - (mean / norm) */
189189
for (int32_t i = 0; i < 3; i++) {
190-
inputTensorInfo.normalize.mean[i] /= inputTensorInfo.normalize.norm[i];
191-
inputTensorInfo.normalize.norm[i] *= 255.0f;
192-
inputTensorInfo.normalize.norm[i] = 1.0f / inputTensorInfo.normalize.norm[i];
190+
tensor_info.normalize.mean[i] /= tensor_info.normalize.norm[i];
191+
tensor_info.normalize.norm[i] *= 255.0f;
192+
tensor_info.normalize.norm[i] = 1.0f / tensor_info.normalize.norm[i];
193193
}
194194
#endif
195195
#if 1
196196
/* Convert to speeden up normalization: ((src / 255) - mean) / norm = (src - (mean * 255)) * (1 / (255 * norm)) */
197197
for (int32_t i = 0; i < 3; i++) {
198-
inputTensorInfo.normalize.mean[i] *= 255.0f;
199-
inputTensorInfo.normalize.norm[i] *= 255.0f;
200-
inputTensorInfo.normalize.norm[i] = 1.0f / inputTensorInfo.normalize.norm[i];
198+
tensor_info.normalize.mean[i] *= 255.0f;
199+
tensor_info.normalize.norm[i] *= 255.0f;
200+
tensor_info.normalize.norm[i] = 1.0f / tensor_info.normalize.norm[i];
201201
}
202202
#endif
203203
}

inference_helper/inference_helper_ncnn.h

+11-11
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,21 @@ class InferenceHelperNcnn : public InferenceHelper {
3333
public:
3434
InferenceHelperNcnn();
3535
~InferenceHelperNcnn() override;
36-
int32_t setNumThread(const int32_t numThread) override;
37-
int32_t setCustomOps(const std::vector<std::pair<const char*, const void*>>& customOps) override;
38-
int32_t initialize(const std::string& modelFilename, std::vector<InputTensorInfo>& inputTensorInfoList, std::vector<OutputTensorInfo>& outputTensorInfoList) override;
39-
int32_t finalize(void) override;
40-
int32_t preProcess(const std::vector<InputTensorInfo>& inputTensorInfoList) override;
41-
int32_t invoke(std::vector<OutputTensorInfo>& outputTensorInfoList) override;
36+
int32_t SetNumThreads(const int32_t num_threads) override;
37+
int32_t SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops) override;
38+
int32_t Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) override;
39+
int32_t Finalize(void) override;
40+
int32_t PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list) override;
41+
int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) override;
4242

4343
private:
44-
void convertNormalizeParameters(InputTensorInfo& tensorInfo);
44+
void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
4545

4646
private:
47-
std::unique_ptr<ncnn::Net> m_net;
48-
std::vector<std::pair<std::string, ncnn::Mat>> m_inMatList; // <name, mat>
49-
std::vector<ncnn::Mat> m_outMatList;
50-
int32_t m_numThread;
47+
std::unique_ptr<ncnn::Net> net_;
48+
std::vector<std::pair<std::string, ncnn::Mat>> in_mat_list_; // <name, mat>
49+
std::vector<ncnn::Mat> out_mat_list_;
50+
int32_t num_threads_;
5151
};
5252

5353
#endif

inference_helper/inference_helper_opencv.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -248,24 +248,24 @@ int32_t InferenceHelperOpenCV::Process(std::vector<OutputTensorInfo>& output_ten
248248
return kRetOk;
249249
}
250250

251-
void InferenceHelperOpenCV::ConvertNormalizeParameters(InputTensorInfo& input_tensor_info)
251+
void InferenceHelperOpenCV::ConvertNormalizeParameters(InputTensorInfo& tensor_info)
252252
{
253-
if (input_tensor_info.data_type != InputTensorInfo::kDataTypeImage) return;
253+
if (tensor_info.data_type != InputTensorInfo::kDataTypeImage) return;
254254

255255
#if 0
256256
/* Convert to speeden up normalization: ((src / 255) - mean) / norm = src * 1 / (255 * norm) - (mean / norm) */
257257
for (int32_t i = 0; i < 3; i++) {
258-
input_tensor_info.normalize.mean[i] /= input_tensor_info.normalize.norm[i];
259-
input_tensor_info.normalize.norm[i] *= 255.0f;
260-
input_tensor_info.normalize.norm[i] = 1.0f / input_tensor_info.normalize.norm[i];
258+
tensor_info.normalize.mean[i] /= tensor_info.normalize.norm[i];
259+
tensor_info.normalize.norm[i] *= 255.0f;
260+
tensor_info.normalize.norm[i] = 1.0f / tensor_info.normalize.norm[i];
261261
}
262262
#endif
263263
#if 1
264264
/* Convert to speeden up normalization: ((src / 255) - mean) / norm = (src - (mean * 255)) * (1 / (255 * norm)) */
265265
for (int32_t i = 0; i < 3; i++) {
266-
input_tensor_info.normalize.mean[i] *= 255.0f;
267-
input_tensor_info.normalize.norm[i] *= 255.0f;
268-
input_tensor_info.normalize.norm[i] = 1.0f / input_tensor_info.normalize.norm[i];
266+
tensor_info.normalize.mean[i] *= 255.0f;
267+
tensor_info.normalize.norm[i] *= 255.0f;
268+
tensor_info.normalize.norm[i] = 1.0f / tensor_info.normalize.norm[i];
269269
}
270270
#endif
271271
}

inference_helper/inference_helper_opencv.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class InferenceHelperOpenCV : public InferenceHelper {
4141
int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) override;
4242

4343
private:
44-
void ConvertNormalizeParameters(InputTensorInfo& input_tensor_info);
44+
void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
4545

4646
private:
4747
cv::dnn::Net net_;

0 commit comments

Comments
 (0)