namespace InferenceEngineImpl {
namespace OpenCVImpl {
-InferenceOpenCV::InferenceOpenCV(std::string protoFile, std::string weightFile) :
- mNet(),
- mConfigFile(protoFile),
- mWeightFile(weightFile),
- mInputData(cv::Mat())
+InferenceOpenCV::InferenceOpenCV(void) :
+ mNet()
{
LOGE("ENTER");
LOGE("LEAVE");
;
}
-int InferenceOpenCV::SetInputTensorParam()
+int InferenceOpenCV::SetTargetDevices(int types)
{
- LOGE("Not supported");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-}
-
-int InferenceOpenCV::SetInputTensorParamNode(std::string node)
-{
- mInputLayer = cv::String(node);
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ LOGI("ENTER");
-int InferenceOpenCV::SetOutputTensorParam()
-{
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-}
-
-int InferenceOpenCV::SetOutputTensorParamNodes(std::vector<std::string> nodes)
-{
- mOutputLayer.clear();
- for (std::vector<std::string>::iterator iter = nodes.begin();
- iter != nodes.end(); ++iter) {
- mOutputLayer.push_back(cv::String(*iter));
- }
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceOpenCV::SetTargetDevice(inference_target_type_e type)
-{
- switch (type) {
+ LOGI("Inferece targets are: ");
+ switch (types) {
case INFERENCE_TARGET_CPU :
mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
break;
case INFERENCE_TARGET_CUSTOM:
case INFERENCE_TARGET_NONE:
default:
- LOGE("Not supported device type [%d], Set CPU mode", (int)type);
+ LOGE("Not supported device type [%d], Set CPU mode", (int)types);
mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
}
+ LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceOpenCV::Load()
+int InferenceOpenCV::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
{
+ LOGI("ENTER");
+
int ret = INFERENCE_ENGINE_ERROR_NONE;
- if (access(mConfigFile.c_str(), F_OK) ||
- access(mWeightFile.c_str(), F_OK)) {
- LOGE("protofile in [%s] ", mConfigFile.c_str());
- LOGE("weightFilePath in [%s] ", mWeightFile.c_str());
- return INFERENCE_ENGINE_ERROR_INVALID_PATH;
- }
+ std::string fileExt;
+ for (std::vector<std::string>::iterator iter = model_paths.begin();
+ iter != model_paths.end(); ++iter) {
+ if (access((*iter).c_str(), F_OK)) {
+ LOGE("model path in [%s] not exist", (*iter).c_str());
+ return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+ }
+ fileExt = (*iter).substr(((*iter).find_last_of("."))+1);
+
+ if (fileExt.compare("caffemodel") == 0 ||
+ fileExt.compare("pb") == 0) {
+ mWeightFile = (*iter);
+ } else {
+ mConfigFile = (*iter);
+ }
+ }
// This call may be changed if OpenCV version would be upgraded
- int nPos = mWeightFile.find_last_of(".");
- std::string weightFileExt = mWeightFile.substr(nPos+1);
- LOGI("%s", weightFileExt.c_str());
- if (weightFileExt.compare("caffemodel") == 0) {
+ if (model_format == INFERENCE_MODEL_CAFFE) {
mNet = cv::dnn::readNetFromCaffe(mConfigFile, mWeightFile);
- } else if (weightFileExt.compare("pb") == 0) {
+ } else if (model_format == INFERENCE_MODEL_TF) {
mNet = cv::dnn::readNetFromTensorflow(mWeightFile, mConfigFile);
} else {
LOGE("Not supported model file!");
return INFERENCE_ENGINE_ERROR_INVALID_DATA;
}
+ LOGI("LEAVE");
+
return ret;
}
-int InferenceOpenCV::CreateInputLayerPassage()
+int InferenceOpenCV::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
{
- /* Do Nothing
- * In OpenCV, don't need to call this CreateInputLayerPassage()
- * because that it can uses cv::Mat directly
- */
+ LOGI("ENTER");
+
+ mInputData.clear();
+
+ void * pBuff = NULL;
+ std::vector<inference_engine_tensor_info>::iterator info_iter;
+ for (info_iter = mInputTensorInfo.begin();
+ info_iter != mInputTensorInfo.end(); ++info_iter) {
+ cv::Mat inputBlob(cv::Size((*info_iter).shape[3], (*info_iter).shape[2]), CV_32FC3);
+ mInputData.push_back(inputBlob);
+
+ pBuff = mInputData.back().ptr<void*>(0);
+ inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 };
+ buffers.push_back(buffer);
+ }
+
+ LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-
-int InferenceOpenCV::GetInputLayerAttrType()
+int InferenceOpenCV::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
{
- return 1;
+ LOGI("ENTER");
+
+
+ mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(), cv::Scalar(), false, false);
+
+ mNet.setInput(mInputBlobs, mInputLayers.front());
+
+ std::vector<cv::String> ouputLayers(mOutputLayers.begin(), mOutputLayers.end());
+ mNet.forward(mOutputBlobs, ouputLayers);
+
+ void *pBuff = NULL;
+ std::vector<cv::Mat>::iterator iter;
+ for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
+ pBuff = (*iter).ptr<void*>(0);
+ inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 };
+ buffers.push_back(buffer);
+ }
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
-void * InferenceOpenCV::GetInputDataPtr()
+int InferenceOpenCV::GetInputLayerProperty(inference_engine_layer_property &property)
{
- return static_cast<void*>(mInputData.data);
+ LOGI("ENTER");
+
+ if (mInputLayers.empty()) {
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ property.layer_names = mInputLayers;
+ property.tensor_infos = mInputTensorInfo;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceOpenCV::SetInputDataBuffer(tensor_t data)
+int InferenceOpenCV::GetOutputLayerProperty(inference_engine_layer_property &property)
{
- mInputData = cv::Mat(data.dimInfo[0][2], data.dimInfo[0][3], data.dimInfo[0][1]);
+ LOGI("ENTER");
+
+ if (mOutputLayers.empty()) {
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ int lid = -1;
+ int idx = 0;
+ std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
+ for (std::vector<std::string>::iterator iter = mOutputLayers.begin();
+ iter != mOutputLayers.end(); ++iter, ++idx) {
+ LOGI("output layer: %s", (*iter).c_str());
+ lid = mNet.getLayerId((*iter));
+ LOGI("output layer Id: %d", lid);
+ if(lid < 0) {
+ LOGE("Invalid output %s layer", (*iter).c_str());
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ std::vector<cv::dnn::MatShape> lInputShape, lOutputShape;
+ LOGI("%d, %d, %d, %d", mInputTensorInfo[idx].shape[0],
+ mInputTensorInfo[idx].shape[1],
+ mInputTensorInfo[idx].shape[2],
+ mInputTensorInfo[idx].shape[3]);
+
+ mNet.getLayerShapes(mInputTensorInfo[idx].shape,
+ lid,
+ lInputShape,
+ lOutputShape);
+ inference_engine_tensor_info tensor_info;
+ tensor_info.data_type =TENSOR_DATA_TYPE_FLOAT32;
+ tensor_info.shape_type = TENSOR_SHAPE_NCHW;
+ // lOutputShape may have multiple tensors
+ // even though the output layer's name is only one
+ LOGI("size of OutputShape: %d", lOutputShape.size());
+ tensor_info.shape = lOutputShape[0];
+
+ tensor_info.size = 1;
+ LOGE("tensor_info");
+ for (std::vector<int>::iterator iter2 = tensor_info.shape.begin();
+ iter2 != tensor_info.shape.end(); ++iter2) {
+ LOGI("%d", (*iter2));
+ tensor_info.size *= (*iter2);
+ }
+ mOutputTensorInfo.push_back(tensor_info);
+ }
+
+ property.layer_names = mOutputLayers;
+ property.tensor_infos = mOutputTensorInfo;
+
+ LOGI("LEAVE");
+
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceOpenCV::Run()
+int InferenceOpenCV::SetInputLayerProperty(inference_engine_layer_property &property)
{
- mInputBlob = cv::dnn::blobFromImage(mInputData, 1.0, cv::Size(), cv::Scalar(), false, false);
+ LOGI("ENTER");
+
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("input layer name = %s", name.c_str());
+ }
+
+ mInputLayers.clear();
+ std::vector<std::string>().swap(mInputLayers);
+
+ mInputTensorInfo.clear();
+ std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
+
+ mInputLayers = property.layer_names;
+ mInputTensorInfo = property.tensor_infos;
- mNet.setInput(mInputBlob, mInputLayer);
- mNet.forward(mOutputProb, mOutputLayer);
+ LOGI("LEAVE");
- if (mOutputProb.empty()) {
- LOGE("OutputProb is empty");
- return INFERENCE_ENGINE_ERROR_INTERNAL;
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceOpenCV::SetOutputLayerProperty(inference_engine_layer_property &property)
+{
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("output layer name = %s", name.c_str());
}
+ mOutputLayers.clear();
+ std::vector<std::string>().swap(mOutputLayers);
+
+ mOutputLayers = property.layer_names;
+
+
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceOpenCV::Run(std::vector<float> tensor)
+int InferenceOpenCV::GetBackendCapacity(inference_engine_capacity *capacity)
{
+ LOGI("ENTER");
+
+ if (capacity == NULL) {
+ LOGE("Bad pointer.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ capacity->supported_accel_devices = INFERENCE_TARGET_CPU;
+
+ LOGI("LEAVE");
+
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceOpenCV::GetInferenceResult(tensor_t& results)
+int InferenceOpenCV::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
{
- std::vector<int> tmpDimInfo;
- LOGE("outputProb size: %d", mOutputProb.size());
- for (std::vector<cv::Mat>::iterator iter = mOutputProb.begin();
- iter != mOutputProb.end(); ++iter) {
- tmpDimInfo.clear();
- for (int d = 0; d < (*iter).dims; ++d) {
- tmpDimInfo.push_back((*iter).size[d]);
- }
+ LOGI("ENTER");
+
+ // need to check memoery
+ mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(), cv::Scalar(), false, false);
- results.dimInfo.push_back(tmpDimInfo);
- results.data.push_back((void*)(*iter).ptr<float>());
+ // Currently it supports that one input layer with multiple input tensors.
+ // it doesn't support that mulitple input layer with multiple input tensors.
+ // To suppor that, setInput is called manually while we matching inputblobs
+ // and their corresponding input layer.
+ // Suppose a case that an input layer and mulitple input tensors are given.
+ mNet.setInput(mInputBlobs, mInputLayers.front());
+
+ int idx = 0;
+
+ if (mOutputBlobs.size() != output_buffers.size()) {
+ LOGE("output_buffers size is %d but outputBlobs %d", output_buffers.size(), mOutputBlobs.size());
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
+ std::vector<cv::String> ouputLayers(mOutputLayers.begin(), mOutputLayers.end());
+
+ mNet.forward(mOutputBlobs, ouputLayers);
+ LOGI("LEAVE");
+
return INFERENCE_ENGINE_ERROR_NONE;
}
extern "C"
{
-class IInferenceEngineCommon* EngineCommonInit(std::string protoFile, std::string weightFile)
+class IInferenceEngineCommon* EngineCommonInit(void)
{
- InferenceOpenCV *engine = new InferenceOpenCV(protoFile, weightFile);
+ InferenceOpenCV *engine = new InferenceOpenCV();
return engine;
}
#include <inference_engine_common.h>
+#include <dlog.h>
+
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
-
-#include <dlog/dlog.h>
/**
* @file inference_engine_opencv_private.h
* @brief This file contains the InferenceOpenCV class which
class InferenceOpenCV : public IInferenceEngineCommon {
public:
- InferenceOpenCV(std::string protoFile,
- std::string weightFile);
-
+ InferenceOpenCV();
~InferenceOpenCV();
- // Input Tensor Params
-
- int SetInputTensorParam() override;
-
- int SetInputTensorParamNode(std::string node = "input") override;
+ int SetTargetDevices(int types) override;
- // Output Tensor Params
- int SetOutputTensorParam() override;
+ int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
- int SetOutputTensorParamNodes(std::vector<std::string> nodes) override;
+ int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
- int SetTargetDevice(inference_target_type_e type) override;
+ int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
- int Load() override;
+ int GetInputLayerProperty(inference_engine_layer_property &property) override;
- int CreateInputLayerPassage() override;
+ int GetOutputLayerProperty(inference_engine_layer_property &property) override;
- int GetInputLayerAttrType() override;
+ int SetInputLayerProperty(inference_engine_layer_property &property) override;
- void * GetInputDataPtr() override;
+ int SetOutputLayerProperty(inference_engine_layer_property &property) override;
- int SetInputDataBuffer(tensor_t data) override;
+ int GetBackendCapacity(inference_engine_capacity *capacity) override;
- int Run() override;
-
- int Run(std::vector<float> tensor) override;
-
- int GetInferenceResult(tensor_t& data) override;
+ int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers) override;
private:
- cv::Mat mInputBlob;
- std::vector<cv::Mat> mOutputProb;
+ std::vector<cv::Mat> mInputData;
+ cv::Mat mInputBlobs;
+
+ std::vector<inference_engine_tensor_info> mInputTensorInfo;
+ std::vector<inference_engine_tensor_info> mOutputTensorInfo;
+ std::vector<cv::Mat> mOutputBlobs;
cv::dnn::Net mNet; /**< Network associated with a network model */
- cv::String mInputLayer;
- std::vector<cv::String> mOutputLayer;
+ std::vector<std::string> mInputLayers;
+ std::vector<std::string> mOutputLayers;
std::string mConfigFile;
std::string mWeightFile;
-
- cv::Mat mInputData;
};
} /* InferenceEngineImpl */