Fix coding style based on Tizen SE C++ Coding Rule 56/235356/1
authorInki Dae <inki.dae@samsung.com>
Thu, 4 Jun 2020 05:36:41 +0000 (14:36 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 4 Jun 2020 05:36:41 +0000 (14:36 +0900)
Tizen SE C++ Coding Rule:
https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=160925159

Change-Id: Ia01c4ef55138bdbccb8fdaebf5b3d86494bde001
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_opencv.cpp
src/inference_engine_opencv_private.h

index e3971e95d9159805745767ae3c499fc64b01bc6d..da5942aa60eb6d754529c668894059f220fff081 100644 (file)
 #include <unistd.h>
 #include <time.h>
 
-namespace InferenceEngineImpl {
-namespace OpenCVImpl {
-
-InferenceOpenCV::InferenceOpenCV(void) :
-    mNet()
-{
-    LOGE("ENTER");
-    LOGE("LEAVE");
-}
-
-InferenceOpenCV::~InferenceOpenCV()
-{
-    ;
-}
-
-int InferenceOpenCV::SetPrivateData(void *data)
-{
-       // Nothing to do yet.
-
-       return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::SetTargetDevices(int types)
-{
-    LOGI("ENTER");
-
-    LOGI("Inferece targets are: ");
-    switch (types) {
-    case INFERENCE_TARGET_CPU :
-        mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
-        break;
-    case INFERENCE_TARGET_GPU :
-        mNet.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL);
-        break;
-    case INFERENCE_TARGET_CUSTOM:
-    case INFERENCE_TARGET_NONE:
-    default:
-        LOGE("Not supported device type [%d], Set CPU mode", (int)types);
-        mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
-    }
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
-{
-    LOGI("ENTER");
-
-    int ret = INFERENCE_ENGINE_ERROR_NONE;
-
-    std::string fileExt;
-    for (std::vector<std::string>::iterator iter = model_paths.begin();
-        iter != model_paths.end(); ++iter) {
-        if (access((*iter).c_str(), F_OK)) {
-            LOGE("model path in [%s] not exist", (*iter).c_str());
-                   return INFERENCE_ENGINE_ERROR_INVALID_PATH;
-        }
-        fileExt = (*iter).substr(((*iter).find_last_of("."))+1);
-
-        if (fileExt.compare("caffemodel") == 0 ||
-            fileExt.compare("pb") == 0) {
-            mWeightFile = (*iter);
-        } else {
-            mConfigFile = (*iter);
-        }
-    }
-
-    // This call may be changed if OpenCV version would be upgraded
-    if (model_format == INFERENCE_MODEL_CAFFE) {
-        mNet = cv::dnn::readNetFromCaffe(mConfigFile, mWeightFile);
-    } else if (model_format == INFERENCE_MODEL_TF) {
-        mNet = cv::dnn::readNetFromTensorflow(mWeightFile, mConfigFile);
-    } else {
-        LOGE("Not supported model file!");
-    }
-
-    if (mNet.empty()) {
-        LOGE("Net is empty");
-        return INFERENCE_ENGINE_ERROR_INVALID_DATA;
-    }
-
-    LOGI("LEAVE");
-
-    return ret;
-}
-
-int InferenceOpenCV::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
-    LOGI("ENTER");
-
-    mInputData.clear();
-
-    void * pBuff = NULL;
-    std::vector<inference_engine_tensor_info>::iterator info_iter;
-    for (info_iter = mInputTensorInfo.begin();
-        info_iter != mInputTensorInfo.end(); ++info_iter) {
-        cv::Mat inputBlob(cv::Size((*info_iter).shape[3], (*info_iter).shape[2]), CV_32FC3);
-        mInputData.push_back(inputBlob);
-
-        pBuff = mInputData.back().ptr<void*>(0);
-        size_t sizeBuff = mInputData.back().elemSize() * mInputData.back().rows * mInputData.back().cols;
-        LOGI("elemSize: %zd, rows: %d, cols: %d", mInputData.back().elemSize(), mInputData.back().rows, mInputData.back().cols );
-        inference_engine_tensor_buffer buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1 };
-        buffers.push_back(buffer);
-    }
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+namespace InferenceEngineImpl
 {
-    LOGI("ENTER");
-
-
-    mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(), cv::Scalar(), false, false);
-
-    mNet.setInput(mInputBlobs, mInputLayers.front());
-
-    std::vector<cv::String> ouputLayers(mOutputLayers.begin(), mOutputLayers.end());
-    mNet.forward(mOutputBlobs, ouputLayers);
-
-    void *pBuff = NULL;
-    std::vector<cv::Mat>::iterator iter;
-    for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
-        pBuff = (*iter).ptr<void*>(0);
-        size_t sizeBuff = (*iter).total() * (*iter).elemSize();
-        inference_engine_tensor_buffer buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1};
-        buffers.push_back(buffer);
-    }
-
-    if (buffers.empty()) {
-        LOGI("buff empty");
-        inference_engine_tensor_buffer buffer = { nullptr, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 1};
-        buffers.push_back(buffer);
-    }
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::GetInputLayerProperty(inference_engine_layer_property &property)
+namespace OpenCVImpl
 {
-    LOGI("ENTER");
-
-    if (mInputLayers.empty()) {
-        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-    }
-
-    property.layer_names = mInputLayers;
-    property.tensor_infos = mInputTensorInfo;
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::GetOutputLayerProperty(inference_engine_layer_property &property)
-{
-    LOGI("ENTER");
-
-    if (mOutputLayers.empty()) {
-        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-    }
-
-    int lid = -1;
-    int idx = 0;
-    std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
-    for (std::vector<std::string>::iterator iter = mOutputLayers.begin();
-        iter != mOutputLayers.end(); ++iter, ++idx) {
-        LOGI("output layer: %s", (*iter).c_str());
-        lid = mNet.getLayerId((*iter));
-        LOGI("output layer Id: %d", lid);
-        if(lid < 0) {
-            LOGE("Invalid output %s layer", (*iter).c_str());
-            return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-        }
-
-        std::vector<cv::dnn::MatShape> lInputShape, lOutputShape;
-        LOGI("%zu, %zu, %zu, %zu", mInputTensorInfo[idx].shape[0],
-                                mInputTensorInfo[idx].shape[1],
-                                mInputTensorInfo[idx].shape[2],
-                                mInputTensorInfo[idx].shape[3]);
-
-               std::vector<int> cvInputTensorShape(mInputTensorInfo[idx].shape.begin(), mInputTensorInfo[idx].shape.end());
-        mNet.getLayerShapes(cvInputTensorShape,
-                            lid,
-                            lInputShape,
-                            lOutputShape);
-        inference_engine_tensor_info tensor_info;
-        tensor_info.data_type =INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-        tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
-        // lOutputShape may have multiple tensors
-        // even though the output layer's name is only one
-        LOGI("size of OutputShape: %zu", lOutputShape.size());
-               std::vector<size_t> ieInputTensorShape(lOutputShape[0].begin(), lOutputShape[0].end());
-        tensor_info.shape = ieInputTensorShape;
-
-        tensor_info.size = 1;
-        LOGE("tensor_info");
-        for (std::vector<size_t>::iterator iter2 = tensor_info.shape.begin();
-            iter2 != tensor_info.shape.end(); ++iter2) {
-            LOGI("%zu", (*iter2));
-            tensor_info.size *= (*iter2);
-        }
-        mOutputTensorInfo.push_back(tensor_info);
-    }
-
-    property.layer_names = mOutputLayers;
-    property.tensor_infos = mOutputTensorInfo;
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::SetInputLayerProperty(inference_engine_layer_property &property)
-{
-    LOGI("ENTER");
-
-    std::vector<std::string>::iterator iter;
-    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
-        std::string name = *iter;
-        LOGI("input layer name = %s", name.c_str());
-    }
+       InferenceOpenCV::InferenceOpenCV(void) : mNet()
+       {
+               LOGE("ENTER");
+               LOGE("LEAVE");
+       }
+
+       InferenceOpenCV::~InferenceOpenCV()
+       {
+               ;
+       }
+
+       int InferenceOpenCV::SetPrivateData(void *data)
+       {
+               // Nothing to do yet.
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::SetTargetDevices(int types)
+       {
+               LOGI("ENTER");
+
+               LOGI("Inferece targets are: ");
+               switch (types) {
+               case INFERENCE_TARGET_CPU:
+                       mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
+                       break;
+               case INFERENCE_TARGET_GPU:
+                       mNet.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL);
+                       break;
+               case INFERENCE_TARGET_CUSTOM:
+               case INFERENCE_TARGET_NONE:
+               default:
+                       LOGE("Not supported device type [%d], Set CPU mode", (int) types);
+                       mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
+               }
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::Load(std::vector<std::string> model_paths,
+                                                         inference_model_format_e model_format)
+       {
+               LOGI("ENTER");
+
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+               std::string fileExt;
+               for (std::vector<std::string>::iterator iter = model_paths.begin();
+                        iter != model_paths.end(); ++iter) {
+                       if (access((*iter).c_str(), F_OK)) {
+                               LOGE("model path in [%s] not exist", (*iter).c_str());
+                               return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+                       }
+                       fileExt = (*iter).substr(((*iter).find_last_of(".")) + 1);
+
+                       if (fileExt.compare("caffemodel") == 0 ||
+                               fileExt.compare("pb") == 0) {
+                               mWeightFile = (*iter);
+                       } else {
+                               mConfigFile = (*iter);
+                       }
+               }
+
+               // This call may be changed if OpenCV version would be upgraded
+               if (model_format == INFERENCE_MODEL_CAFFE) {
+                       mNet = cv::dnn::readNetFromCaffe(mConfigFile, mWeightFile);
+               } else if (model_format == INFERENCE_MODEL_TF) {
+                       mNet = cv::dnn::readNetFromTensorflow(mWeightFile, mConfigFile);
+               } else {
+                       LOGE("Not supported model file!");
+               }
+
+               if (mNet.empty()) {
+                       LOGE("Net is empty");
+                       return INFERENCE_ENGINE_ERROR_INVALID_DATA;
+               }
+
+               LOGI("LEAVE");
+
+               return ret;
+       }
+
+       int InferenceOpenCV::GetInputTensorBuffers(
+                       std::vector<inference_engine_tensor_buffer> &buffers)
+       {
+               LOGI("ENTER");
+
+               mInputData.clear();
+
+               void *pBuff = NULL;
+               std::vector<inference_engine_tensor_info>::iterator info_iter;
+               for (info_iter = mInputTensorInfo.begin();
+                        info_iter != mInputTensorInfo.end(); ++info_iter) {
+                       cv::Mat inputBlob(cv::Size((*info_iter).shape[3],
+                                                                          (*info_iter).shape[2]),
+                                                         CV_32FC3);
+                       mInputData.push_back(inputBlob);
+
+                       pBuff = mInputData.back().ptr<void *>(0);
+                       size_t sizeBuff = mInputData.back().elemSize() *
+                                                         mInputData.back().rows * mInputData.back().cols;
+                       LOGI("elemSize: %zd, rows: %d, cols: %d",
+                                mInputData.back().elemSize(), mInputData.back().rows,
+                                mInputData.back().cols);
+                       inference_engine_tensor_buffer buffer = {
+                               pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
+                       };
+                       buffers.push_back(buffer);
+               }
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::GetOutputTensorBuffers(
+                       std::vector<inference_engine_tensor_buffer> &buffers)
+       {
+               LOGI("ENTER");
+
+               mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
+                                                                                         cv::Scalar(), false, false);
+
+               mNet.setInput(mInputBlobs, mInputLayers.front());
+
+               std::vector<cv::String> ouputLayers(mOutputLayers.begin(),
+                                                                                       mOutputLayers.end());
+               mNet.forward(mOutputBlobs, ouputLayers);
+
+               void *pBuff = NULL;
+               std::vector<cv::Mat>::iterator iter;
+               for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
+                       pBuff = (*iter).ptr<void *>(0);
+                       size_t sizeBuff = (*iter).total() * (*iter).elemSize();
+                       inference_engine_tensor_buffer buffer = {
+                               pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
+                       };
+                       buffers.push_back(buffer);
+               }
+
+               if (buffers.empty()) {
+                       LOGI("buff empty");
+                       inference_engine_tensor_buffer buffer = {
+                               nullptr, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 1
+                       };
+                       buffers.push_back(buffer);
+               }
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::GetInputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
+
+               if (mInputLayers.empty()) {
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               property.layer_names = mInputLayers;
+               property.tensor_infos = mInputTensorInfo;
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::GetOutputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
+
+               if (mOutputLayers.empty()) {
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               int lid = -1;
+               int idx = 0;
+               std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
+               for (std::vector<std::string>::iterator iter = mOutputLayers.begin();
+                        iter != mOutputLayers.end(); ++iter, ++idx) {
+                       LOGI("output layer: %s", (*iter).c_str());
+                       lid = mNet.getLayerId((*iter));
+                       LOGI("output layer Id: %d", lid);
+                       if (lid < 0) {
+                               LOGE("Invalid output %s layer", (*iter).c_str());
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       std::vector<cv::dnn::MatShape> lInputShape, lOutputShape;
+                       LOGI("%zu, %zu, %zu, %zu", mInputTensorInfo[idx].shape[0],
+                                mInputTensorInfo[idx].shape[1], mInputTensorInfo[idx].shape[2],
+                                mInputTensorInfo[idx].shape[3]);
+
+                       std::vector<int> cvInputTensorShape(
+                                       mInputTensorInfo[idx].shape.begin(),
+                                       mInputTensorInfo[idx].shape.end());
+                       mNet.getLayerShapes(cvInputTensorShape, lid, lInputShape,
+                                                               lOutputShape);
+                       inference_engine_tensor_info tensor_info;
+                       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                       tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
+                       // lOutputShape may have multiple tensors
+                       // even though the output layer's name is only one
+                       LOGI("size of OutputShape: %zu", lOutputShape.size());
+                       std::vector<size_t> ieInputTensorShape(lOutputShape[0].begin(),
+                                                                                                  lOutputShape[0].end());
+                       tensor_info.shape = ieInputTensorShape;
+
+                       tensor_info.size = 1;
+                       LOGE("tensor_info");
+                       for (std::vector<size_t>::iterator iter2 =
+                                                tensor_info.shape.begin();
+                                iter2 != tensor_info.shape.end(); ++iter2) {
+                               LOGI("%zu", (*iter2));
+                               tensor_info.size *= (*iter2);
+                       }
+                       mOutputTensorInfo.push_back(tensor_info);
+               }
+
+               property.layer_names = mOutputLayers;
+               property.tensor_infos = mOutputTensorInfo;
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::SetInputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
+
+               std::vector<std::string>::iterator iter;
+               for (iter = property.layer_names.begin();
+                        iter != property.layer_names.end(); iter++) {
+                       std::string name = *iter;
+                       LOGI("input layer name = %s", name.c_str());
+               }
+
+               mInputLayers.clear();
+               std::vector<std::string>().swap(mInputLayers);
+
+               mInputTensorInfo.clear();
+               std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
+
+               mInputLayers = property.layer_names;
+               mInputTensorInfo = property.tensor_infos;
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::SetOutputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               std::vector<std::string>::iterator iter;
+               for (iter = property.layer_names.begin();
+                        iter != property.layer_names.end(); iter++) {
+                       std::string name = *iter;
+                       LOGI("output layer name = %s", name.c_str());
+               }
+
+               mOutputLayers.clear();
+               std::vector<std::string>().swap(mOutputLayers);
+
+               mOutputLayers = property.layer_names;
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::GetBackendCapacity(inference_engine_capacity *capacity)
+       {
+               LOGI("ENTER");
+
+               if (capacity == NULL) {
+                       LOGE("Bad pointer.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               capacity->supported_accel_devices = INFERENCE_TARGET_CPU;
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InferenceOpenCV::Run(
+                       std::vector<inference_engine_tensor_buffer> &input_buffers,
+                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+       {
+               LOGI("ENTER");
+
+               // need to check memoery
+               mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
+                                                                                         cv::Scalar(), false, false);
+
+               // Currently it supports that one input layer with multiple input tensors.
+               // it doesn't support that mulitple input layer with multiple input tensors.
+               // To suppor that, setInput is called manually while we matching inputblobs
+               // and their corresponding input layer.
+               // Suppose a case that an input layer and mulitple input tensors are given.
+               mNet.setInput(mInputBlobs, mInputLayers.front());
+
+               int idx = 0;
+
+               if (mOutputBlobs.size() != output_buffers.size()) {
+                       LOGE("output_buffers size is %zu but outputBlobs %zu",
+                                output_buffers.size(), mOutputBlobs.size());
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               std::vector<cv::String> outputLayers(mOutputLayers.begin(),
+                                                                                        mOutputLayers.end());
+
+               mNet.forward(mOutputBlobs, outputLayers);
 
-    mInputLayers.clear();
-    std::vector<std::string>().swap(mInputLayers);
+               // mOutputBlobs[0] (the shape is 1x1xNx7 and the 1st of 7
+               // indicats the image id). use the 1st of 7 as the number of detections if a batch mode isn't supported.
+               if (outputLayers[0].compare("detection_out") == 0) {
+                       cv::Mat cvOutputData(
+                                       mOutputBlobs[0].size[2], mOutputBlobs[0].size[3], CV_32F,
+                                       reinterpret_cast<float *>(mOutputBlobs[0].ptr<float *>(0)));
+                       cvOutputData.at<float>(0, 0) = mOutputBlobs[0].size[2];
+               }
+
+               for (int k = 0; k < output_buffers.size(); ++k)
+                       output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       extern "C"
+       {
+               class IInferenceEngineCommon *EngineCommonInit(void)
+               {
+                       InferenceOpenCV *engine = new InferenceOpenCV();
+                       return engine;
+               }
 
-    mInputTensorInfo.clear();
-    std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
-    mInputLayers = property.layer_names;
-    mInputTensorInfo = property.tensor_infos;
-
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::SetOutputLayerProperty(inference_engine_layer_property &property)
-{
-    std::vector<std::string>::iterator iter;
-    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
-        std::string name = *iter;
-        LOGI("output layer name = %s", name.c_str());
-    }
-
-    mOutputLayers.clear();
-    std::vector<std::string>().swap(mOutputLayers);
-
-    mOutputLayers = property.layer_names;
-
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::GetBackendCapacity(inference_engine_capacity *capacity)
-{
-    LOGI("ENTER");
-
-    if (capacity == NULL) {
-        LOGE("Bad pointer.");
-        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-    }
-
-    capacity->supported_accel_devices = INFERENCE_TARGET_CPU;
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                        std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
-    LOGI("ENTER");
-
-    // need to check memoery
-    mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(), cv::Scalar(), false, false);
-
-    // Currently it supports that one input layer with multiple input tensors.
-    // it doesn't support that mulitple input layer with multiple input tensors.
-    // To suppor that, setInput is called manually while we matching inputblobs
-    // and their corresponding input layer.
-    // Suppose a case that an input layer and mulitple input tensors are given.
-    mNet.setInput(mInputBlobs, mInputLayers.front());
-
-    int idx = 0;
-
-    if (mOutputBlobs.size() != output_buffers.size()) {
-        LOGE("output_buffers size is %zu but outputBlobs %zu", output_buffers.size(), mOutputBlobs.size());
-        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-    }
-
-    std::vector<cv::String> outputLayers(mOutputLayers.begin(), mOutputLayers.end());
-
-    mNet.forward(mOutputBlobs, outputLayers);
-
-    // mOutputBlobs[0] (the shape is 1x1xNx7 and the 1st of 7
-    // indicats the image id). use the 1st of 7 as the number of detections if a batch mode isn't supported.
-    if (outputLayers[0].compare("detection_out") == 0) {
-        cv::Mat cvOutputData(mOutputBlobs[0].size[2], mOutputBlobs[0].size[3], CV_32F, reinterpret_cast<float*>(mOutputBlobs[0].ptr<float*>(0)));
-        cvOutputData.at<float>(0,0) = mOutputBlobs[0].size[2];
-    }
-
-    for (int k = 0; k < output_buffers.size(); ++k)
-        output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
-
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-extern "C"
-{
-class IInferenceEngineCommon* EngineCommonInit(void)
-{
-    InferenceOpenCV *engine = new InferenceOpenCV();
-    return engine;
-}
-
-void EngineCommonDestroy(class IInferenceEngineCommon *engine)
-{
-    delete engine;
-}
-}
+               void EngineCommonDestroy(class IInferenceEngineCommon *engine)
+               {
+                       delete engine;
+               }
+       }
 } /* OpenCVImpl */
 } /* InferenceEngineImpl */
index db5d087719bd59fe00dfdd1d6bd902f4d6942f65..cd2c7409a627653281a3a3835e2e2d6881aabd5f 100644 (file)
 
 using namespace InferenceEngineInterface::Common;
 
-namespace InferenceEngineImpl {
-namespace OpenCVImpl {
+namespace InferenceEngineImpl
+{
+namespace OpenCVImpl
+{
+       class InferenceOpenCV : public IInferenceEngineCommon
+       {
+       public:
+               InferenceOpenCV();
+               ~InferenceOpenCV();
 
-class InferenceOpenCV : public IInferenceEngineCommon {
-public:
-    InferenceOpenCV();
-    ~InferenceOpenCV();
+               int SetPrivateData(void *data) override;
 
-    int SetPrivateData(void *data) override;
+               int SetTargetDevices(int types) override;
 
-    int SetTargetDevices(int types) override;
+               int Load(std::vector<std::string> model_paths,
+                                inference_model_format_e model_format) override;
 
-    int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
+               int GetInputTensorBuffers(
+                               std::vector<inference_engine_tensor_buffer> &buffers) override;
 
-    int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
+               int GetOutputTensorBuffers(
+                               std::vector<inference_engine_tensor_buffer> &buffers) override;
 
-    int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
+               int GetInputLayerProperty(
+                               inference_engine_layer_property &property) override;
 
-    int GetInputLayerProperty(inference_engine_layer_property &property) override;
+               int GetOutputLayerProperty(
+                               inference_engine_layer_property &property) override;
 
-    int GetOutputLayerProperty(inference_engine_layer_property &property) override;
+               int SetInputLayerProperty(
+                               inference_engine_layer_property &property) override;
 
-    int SetInputLayerProperty(inference_engine_layer_property &property) override;
+               int SetOutputLayerProperty(
+                               inference_engine_layer_property &property) override;
 
-    int SetOutputLayerProperty(inference_engine_layer_property &property) override;
+               int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
-    int GetBackendCapacity(inference_engine_capacity *capacity) override;
+               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+                               override;
 
-    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-            std::vector<inference_engine_tensor_buffer> &output_buffers) override;
+       private:
+               std::vector<cv::Mat> mInputData;
+               cv::Mat mInputBlobs;
 
-private:
-    std::vector<cv::Mat> mInputData;
-    cv::Mat mInputBlobs;
+               std::vector<inference_engine_tensor_info> mInputTensorInfo;
+               std::vector<inference_engine_tensor_info> mOutputTensorInfo;
+               std::vector<cv::Mat> mOutputBlobs;
+               cv::dnn::Net mNet; /**< Network associated with a network model */
 
-    std::vector<inference_engine_tensor_info> mInputTensorInfo;
-    std::vector<inference_engine_tensor_info> mOutputTensorInfo;
-    std::vector<cv::Mat> mOutputBlobs;
-    cv::dnn::Net mNet; /**< Network associated with a network model */
+               std::vector<std::string> mInputLayers;
+               std::vector<std::string> mOutputLayers;
 
-    std::vector<std::string> mInputLayers;
-    std::vector<std::string> mOutputLayers;
-
-    std::string mConfigFile;
-    std::string mWeightFile;
-};
+               std::string mConfigFile;
+               std::string mWeightFile;
+       };
 
 } /* InferenceEngineImpl */
 } /* OpenCV */