2 * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <inference_engine_error.h>
18 #include "inference_engine_opencv_private.h"
20 #include <opencv2/core/ocl.hpp>
27 namespace InferenceEngineImpl
31 InferenceOpenCV::InferenceOpenCV(void) : mNet()
37 InferenceOpenCV::~InferenceOpenCV()
42 int InferenceOpenCV::SetPrivateData(void *data)
46 return INFERENCE_ENGINE_ERROR_NONE;
49 int InferenceOpenCV::SetTargetDevices(int types)
53 LOGI("Inferece targets are: ");
55 case INFERENCE_TARGET_CPU:
56 mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
58 case INFERENCE_TARGET_GPU:
59 mNet.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL);
61 case INFERENCE_TARGET_CUSTOM:
62 case INFERENCE_TARGET_NONE:
64 LOGE("Not supported device type [%d], Set CPU mode", (int) types);
65 mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
69 return INFERENCE_ENGINE_ERROR_NONE;
72 int InferenceOpenCV::SetCLTuner(const inference_engine_cltuner *cltuner)
76 // Nothing to do because OpenCV doesn't support CLTuner feature yet.
80 return INFERENCE_ENGINE_ERROR_NONE;
83 int InferenceOpenCV::Load(std::vector<std::string> model_paths,
84 inference_model_format_e model_format)
88 int ret = INFERENCE_ENGINE_ERROR_NONE;
91 for (std::vector<std::string>::iterator iter = model_paths.begin();
92 iter != model_paths.end(); ++iter) {
93 if (access((*iter).c_str(), F_OK)) {
94 LOGE("model path in [%s] not exist", (*iter).c_str());
95 return INFERENCE_ENGINE_ERROR_INVALID_PATH;
97 fileExt = (*iter).substr(((*iter).find_last_of(".")) + 1);
99 if (fileExt.compare("caffemodel") == 0 ||
100 fileExt.compare("pb") == 0) {
101 mWeightFile = (*iter);
103 mConfigFile = (*iter);
107 // This call may be changed if OpenCV version would be upgraded
108 if (model_format == INFERENCE_MODEL_CAFFE) {
109 mNet = cv::dnn::readNetFromCaffe(mConfigFile, mWeightFile);
110 } else if (model_format == INFERENCE_MODEL_TF) {
111 mNet = cv::dnn::readNetFromTensorflow(mWeightFile, mConfigFile);
113 LOGE("Not supported model file!");
117 LOGE("Net is empty");
118 return INFERENCE_ENGINE_ERROR_INVALID_DATA;
126 int InferenceOpenCV::GetInputTensorBuffers(
127 std::vector<inference_engine_tensor_buffer> &buffers)
134 std::vector<inference_engine_tensor_info>::iterator info_iter;
135 for (info_iter = mInputTensorInfo.begin();
136 info_iter != mInputTensorInfo.end(); ++info_iter) {
137 cv::Mat inputBlob(cv::Size((*info_iter).shape[3],
138 (*info_iter).shape[2]),
140 mInputData.push_back(inputBlob);
142 pBuff = mInputData.back().ptr<void *>(0);
143 size_t sizeBuff = mInputData.back().elemSize() *
144 mInputData.back().rows * mInputData.back().cols;
145 LOGI("elemSize: %zd, rows: %d, cols: %d",
146 mInputData.back().elemSize(), mInputData.back().rows,
147 mInputData.back().cols);
148 inference_engine_tensor_buffer buffer = {
149 pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
151 buffers.push_back(buffer);
156 return INFERENCE_ENGINE_ERROR_NONE;
159 int InferenceOpenCV::GetOutputTensorBuffers(
160 std::vector<inference_engine_tensor_buffer> &buffers)
164 mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
165 cv::Scalar(), false, false);
167 mNet.setInput(mInputBlobs, mInputLayers.front());
169 std::vector<cv::String> ouputLayers(mOutputLayers.begin(),
170 mOutputLayers.end());
171 mNet.forward(mOutputBlobs, ouputLayers);
174 std::vector<cv::Mat>::iterator iter;
175 for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
176 pBuff = (*iter).ptr<void *>(0);
177 size_t sizeBuff = (*iter).total() * (*iter).elemSize();
178 inference_engine_tensor_buffer buffer = {
179 pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
181 buffers.push_back(buffer);
184 if (buffers.empty()) {
186 inference_engine_tensor_buffer buffer = {
187 nullptr, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 1
189 buffers.push_back(buffer);
194 return INFERENCE_ENGINE_ERROR_NONE;
197 int InferenceOpenCV::GetInputLayerProperty(
198 inference_engine_layer_property &property)
202 if (mInputLayers.empty()) {
203 return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
206 property.layer_names = mInputLayers;
207 property.tensor_infos = mInputTensorInfo;
211 return INFERENCE_ENGINE_ERROR_NONE;
214 int InferenceOpenCV::GetOutputLayerProperty(
215 inference_engine_layer_property &property)
219 if (mOutputLayers.empty()) {
220 return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
225 std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
226 for (std::vector<std::string>::iterator iter = mOutputLayers.begin();
227 iter != mOutputLayers.end(); ++iter, ++idx) {
228 LOGI("output layer: %s", (*iter).c_str());
229 lid = mNet.getLayerId((*iter));
230 LOGI("output layer Id: %d", lid);
232 LOGE("Invalid output %s layer", (*iter).c_str());
233 return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
236 std::vector<cv::dnn::MatShape> lInputShape, lOutputShape;
237 LOGI("%zu, %zu, %zu, %zu", mInputTensorInfo[idx].shape[0],
238 mInputTensorInfo[idx].shape[1], mInputTensorInfo[idx].shape[2],
239 mInputTensorInfo[idx].shape[3]);
241 std::vector<int> cvInputTensorShape(
242 mInputTensorInfo[idx].shape.begin(),
243 mInputTensorInfo[idx].shape.end());
244 mNet.getLayerShapes(cvInputTensorShape, lid, lInputShape,
246 inference_engine_tensor_info tensor_info;
247 tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
248 tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
249 // lOutputShape may have multiple tensors
250 // even though the output layer's name is only one
251 LOGI("size of OutputShape: %zu", lOutputShape.size());
252 std::vector<size_t> ieInputTensorShape(lOutputShape[0].begin(),
253 lOutputShape[0].end());
254 tensor_info.shape = ieInputTensorShape;
256 tensor_info.size = 1;
258 for (std::vector<size_t>::iterator iter2 =
259 tensor_info.shape.begin();
260 iter2 != tensor_info.shape.end(); ++iter2) {
261 LOGI("%zu", (*iter2));
262 tensor_info.size *= (*iter2);
264 mOutputTensorInfo.push_back(tensor_info);
267 property.layer_names = mOutputLayers;
268 property.tensor_infos = mOutputTensorInfo;
272 return INFERENCE_ENGINE_ERROR_NONE;
275 int InferenceOpenCV::SetInputLayerProperty(
276 inference_engine_layer_property &property)
280 std::vector<std::string>::iterator iter;
281 for (iter = property.layer_names.begin();
282 iter != property.layer_names.end(); iter++) {
283 std::string name = *iter;
284 LOGI("input layer name = %s", name.c_str());
287 mInputLayers.clear();
288 std::vector<std::string>().swap(mInputLayers);
290 mInputTensorInfo.clear();
291 std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
293 mInputLayers = property.layer_names;
294 mInputTensorInfo = property.tensor_infos;
298 return INFERENCE_ENGINE_ERROR_NONE;
301 int InferenceOpenCV::SetOutputLayerProperty(
302 inference_engine_layer_property &property)
304 std::vector<std::string>::iterator iter;
305 for (iter = property.layer_names.begin();
306 iter != property.layer_names.end(); iter++) {
307 std::string name = *iter;
308 LOGI("output layer name = %s", name.c_str());
311 mOutputLayers.clear();
312 std::vector<std::string>().swap(mOutputLayers);
314 mOutputLayers = property.layer_names;
316 return INFERENCE_ENGINE_ERROR_NONE;
319 int InferenceOpenCV::GetBackendCapacity(inference_engine_capacity *capacity)
323 if (capacity == NULL) {
324 LOGE("Bad pointer.");
325 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
328 capacity->supported_accel_devices = INFERENCE_TARGET_CPU;
332 return INFERENCE_ENGINE_ERROR_NONE;
335 int InferenceOpenCV::Run(
336 std::vector<inference_engine_tensor_buffer> &input_buffers,
337 std::vector<inference_engine_tensor_buffer> &output_buffers)
341 // need to check memoery
342 mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
343 cv::Scalar(), false, false);
345 // Currently it supports that one input layer with multiple input tensors.
346 // it doesn't support that mulitple input layer with multiple input tensors.
347 // To suppor that, setInput is called manually while we matching inputblobs
348 // and their corresponding input layer.
349 // Suppose a case that an input layer and mulitple input tensors are given.
350 mNet.setInput(mInputBlobs, mInputLayers.front());
352 if (mOutputBlobs.size() != output_buffers.size()) {
353 LOGE("output_buffers size is %zu but outputBlobs %zu",
354 output_buffers.size(), mOutputBlobs.size());
355 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
358 std::vector<cv::String> outputLayers(mOutputLayers.begin(),
359 mOutputLayers.end());
361 mNet.forward(mOutputBlobs, outputLayers);
363 // mOutputBlobs[0] (the shape is 1x1xNx7 and the 1st of 7
364 // indicats the image id). use the 1st of 7 as the number of detections if a batch mode isn't supported.
365 if (outputLayers[0].compare("detection_out") == 0) {
366 cv::Mat cvOutputData(
367 mOutputBlobs[0].size[2], mOutputBlobs[0].size[3], CV_32F,
368 reinterpret_cast<float *>(mOutputBlobs[0].ptr<float *>(0)));
369 cvOutputData.at<float>(0, 0) = mOutputBlobs[0].size[2];
372 for (unsigned int k = 0; k < output_buffers.size(); ++k)
373 output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
377 return INFERENCE_ENGINE_ERROR_NONE;
382 class IInferenceEngineCommon *EngineCommonInit(void)
384 InferenceOpenCV *engine = new InferenceOpenCV();
388 void EngineCommonDestroy(class IInferenceEngineCommon *engine)
394 } /* InferenceEngineImpl */