From 358d67e3357c4e9a40eca9873767512c7b749d03 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 11 Mar 2021 10:17:01 +0900 Subject: [PATCH] Change members of inference_engine_layer_property structure, and change vector to map This is based on https://review.tizen.org/gerrit/#/c/platform/core/multimedia/inference-engine-interface/+/254892/ https://review.tizen.org/gerrit/#/c/platform/core/api/mediavision/+/254953/ Change-Id: I9316e46cade0e51aa18b9d339d2f618949489e0c Signed-off-by: Tae-Young Chung --- packaging/inference-engine-opencv.spec | 2 +- src/inference_engine_opencv.cpp | 143 ++++++++++++++++----------------- src/inference_engine_opencv_private.h | 16 ++-- 3 files changed, 77 insertions(+), 84 deletions(-) diff --git a/packaging/inference-engine-opencv.spec b/packaging/inference-engine-opencv.spec index b9860ea..513e638 100644 --- a/packaging/inference-engine-opencv.spec +++ b/packaging/inference-engine-opencv.spec @@ -1,7 +1,7 @@ Name: inference-engine-opencv Summary: OpenCV based implementation of inference-engine-interface Version: 0.0.2 -Release: 6 +Release: 7 Group: Multimedia/Libraries License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/src/inference_engine_opencv.cpp b/src/inference_engine_opencv.cpp index 3d12674..c85eebe 100644 --- a/src/inference_engine_opencv.cpp +++ b/src/inference_engine_opencv.cpp @@ -50,7 +50,7 @@ namespace OpenCVImpl { LOGI("ENTER"); - LOGI("Inferece targets are: "); + LOGI("Inferece target is [%d]", types); switch (types) { case INFERENCE_TARGET_CPU: mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); @@ -61,7 +61,7 @@ namespace OpenCVImpl case INFERENCE_TARGET_CUSTOM: case INFERENCE_TARGET_NONE: default: - LOGE("Not supported device type [%d], Set CPU mode", (int) types); + LOGE("Not supported device type [%d], Set CPU mode", types); mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); } LOGI("LEAVE"); @@ -124,19 +124,16 @@ namespace OpenCVImpl } int InferenceOpenCV::GetInputTensorBuffers( - std::vector &buffers) + std::map &buffers) { LOGI("ENTER"); mInputData.clear(); void *pBuff = NULL; - std::vector::iterator info_iter; - for (info_iter = mInputTensorInfo.begin(); - info_iter != mInputTensorInfo.end(); ++info_iter) { - cv::Mat inputBlob(cv::Size((*info_iter).shape[3], - (*info_iter).shape[2]), - CV_32FC3); + for (auto& layer : mInputLayers) { + std::vector& shape = layer.second.shape; + cv::Mat inputBlob(cv::Size(shape[3], shape[2]), CV_32FC3); mInputData.push_back(inputBlob); pBuff = mInputData.back().ptr(0); @@ -148,7 +145,7 @@ namespace OpenCVImpl inference_engine_tensor_buffer buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1 }; - buffers.push_back(buffer); + buffers.insert(std::make_pair(layer.first, buffer)); } LOGI("LEAVE"); @@ -157,36 +154,39 @@ namespace OpenCVImpl } int InferenceOpenCV::GetOutputTensorBuffers( - std::vector &buffers) + std::map &buffers) { LOGI("ENTER"); mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(), cv::Scalar(), false, false); - mNet.setInput(mInputBlobs, mInputLayers.front()); + mNet.setInput(mInputBlobs, mInputLayers.begin()->first); - std::vector ouputLayers(mOutputLayers.begin(), - mOutputLayers.end()); - mNet.forward(mOutputBlobs, ouputLayers); + std::vector outputLayers; + for (auto& layer : mOutputLayers) { + LOGI("output layer: %s", layer.first.c_str()); + outputLayers.push_back(layer.first); + } + + mNet.forward(mOutputBlobs, outputLayers); void *pBuff = NULL; - std::vector::iterator iter; - for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) { - pBuff = (*iter).ptr(0); - size_t sizeBuff = (*iter).total() * (*iter).elemSize(); + std::map().swap(mOutputLayerId); + for (size_t idx = 0; idx < mOutputBlobs.size(); ++idx) { + pBuff = mOutputBlobs[idx].ptr(0); + size_t sizeBuff = mOutputBlobs[idx].total() * mOutputBlobs[idx].elemSize(); inference_engine_tensor_buffer buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1 }; - buffers.push_back(buffer); + + LOGI("output layer: %s with %p", outputLayers[idx].c_str(), buffer.buffer); + buffers.insert(std::make_pair(outputLayers[idx], buffer)); + mOutputLayerId.insert(std::make_pair(outputLayers[idx], mNet.getLayerId(outputLayers[idx]))); } if (buffers.empty()) { LOGI("buff empty"); - inference_engine_tensor_buffer buffer = { - nullptr, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 1 - }; - buffers.push_back(buffer); } LOGI("LEAVE"); @@ -203,8 +203,7 @@ namespace OpenCVImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - property.layer_names = mInputLayers; - property.tensor_infos = mInputTensorInfo; + property.layers = mInputLayers; LOGI("LEAVE"); @@ -221,27 +220,29 @@ namespace OpenCVImpl } int lid = -1; - int idx = 0; - std::vector().swap(mOutputTensorInfo); - for (std::vector::iterator iter = mOutputLayers.begin(); - iter != mOutputLayers.end(); ++iter, ++idx) { - LOGI("output layer: %s", (*iter).c_str()); - lid = mNet.getLayerId((*iter)); + std::vector> inputTensorShapes; + for (auto& layer : mInputLayers) { + inputTensorShapes.push_back(std::vector( + layer.second.shape.begin(), + layer.second.shape.end())); + LOGI("input layer[%s]: %zu, %zu, %zu, %zu",layer.first.c_str(), + layer.second.shape[0], layer.second.shape[1], + layer.second.shape[2], layer.second.shape[3]); + } + + std::map().swap(mOutputLayers); + for (auto& layer : mOutputLayerId) { + LOGI("output layer: %s", layer.first.c_str()); + lid = layer.second; LOGI("output layer Id: %d", lid); if (lid < 0) { - LOGE("Invalid output %s layer", (*iter).c_str()); + LOGE("Invalid output %s layer", layer.first.c_str()); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } std::vector lInputShape, lOutputShape; - LOGI("%zu, %zu, %zu, %zu", mInputTensorInfo[idx].shape[0], - mInputTensorInfo[idx].shape[1], mInputTensorInfo[idx].shape[2], - mInputTensorInfo[idx].shape[3]); - - std::vector cvInputTensorShape( - mInputTensorInfo[idx].shape.begin(), - mInputTensorInfo[idx].shape.end()); - mNet.getLayerShapes(cvInputTensorShape, lid, lInputShape, + + mNet.getLayerShapes(inputTensorShapes, lid, lInputShape, lOutputShape); inference_engine_tensor_info tensor_info; tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; @@ -255,17 +256,14 @@ namespace OpenCVImpl tensor_info.size = 1; LOGE("tensor_info"); - for (std::vector::iterator iter2 = - tensor_info.shape.begin(); - iter2 != tensor_info.shape.end(); ++iter2) { - LOGI("%zu", (*iter2)); - tensor_info.size *= (*iter2); + for (auto& dim : tensor_info.shape) { + LOGI("%zu", dim); + tensor_info.size *= dim; } - mOutputTensorInfo.push_back(tensor_info); + mOutputLayers.insert(std::make_pair(layer.first, tensor_info)); } - property.layer_names = mOutputLayers; - property.tensor_infos = mOutputTensorInfo; + property.layers = mOutputLayers; LOGI("LEAVE"); @@ -277,21 +275,14 @@ namespace OpenCVImpl { LOGI("ENTER"); - std::vector::iterator iter; - for (iter = property.layer_names.begin(); - iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("input layer name = %s", name.c_str()); + for (auto& layer : property.layers) { + LOGI("input layer name = %s", layer.first.c_str()); } mInputLayers.clear(); - std::vector().swap(mInputLayers); + std::map().swap(mInputLayers); - mInputTensorInfo.clear(); - std::vector().swap(mInputTensorInfo); - - mInputLayers = property.layer_names; - mInputTensorInfo = property.tensor_infos; + mInputLayers = property.layers; LOGI("LEAVE"); @@ -301,17 +292,14 @@ namespace OpenCVImpl int InferenceOpenCV::SetOutputLayerProperty( inference_engine_layer_property &property) { - std::vector::iterator iter; - for (iter = property.layer_names.begin(); - iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("output layer name = %s", name.c_str()); + for (auto& layer : property.layers) { + LOGI("output layer name = %s", layer.first.c_str()); } mOutputLayers.clear(); - std::vector().swap(mOutputLayers); + std::map().swap(mOutputLayers); - mOutputLayers = property.layer_names; + mOutputLayers = property.layers; return INFERENCE_ENGINE_ERROR_NONE; } @@ -333,21 +321,21 @@ namespace OpenCVImpl } int InferenceOpenCV::Run( - std::vector &input_buffers, - std::vector &output_buffers) + std::map &input_buffers, + std::map &output_buffers) { LOGI("ENTER"); // need to check memoery mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(), - cv::Scalar(), false, false); + cv::Scalar(), true, false); // Currently it supports that one input layer with multiple input tensors. // it doesn't support that mulitple input layer with multiple input tensors. // To suppor that, setInput is called manually while we matching inputblobs // and their corresponding input layer. // Suppose a case that an input layer and mulitple input tensors are given. - mNet.setInput(mInputBlobs, mInputLayers.front()); + mNet.setInput(mInputBlobs, input_buffers.begin()->first); if (mOutputBlobs.size() != output_buffers.size()) { LOGE("output_buffers size is %zu but outputBlobs %zu", @@ -355,8 +343,10 @@ namespace OpenCVImpl return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } - std::vector outputLayers(mOutputLayers.begin(), - mOutputLayers.end()); + std::vector outputLayers; + for (auto& layer : mOutputLayers) { + outputLayers.push_back(layer.first); + } mNet.forward(mOutputBlobs, outputLayers); @@ -369,8 +359,11 @@ namespace OpenCVImpl cvOutputData.at(0, 0) = mOutputBlobs[0].size[2]; } - for (unsigned int k = 0; k < output_buffers.size(); ++k) - output_buffers[k].buffer = mOutputBlobs[k].ptr(0); + int idx = 0; + for (auto iter = output_buffers.begin(); iter != output_buffers.end(); ++iter, ++idx) { + LOGI("output: %s", iter->first.c_str()); + iter->second.buffer = mOutputBlobs[idx].ptr(0); + } LOGI("LEAVE"); diff --git a/src/inference_engine_opencv_private.h b/src/inference_engine_opencv_private.h index ad7cc46..a3eb84a 100644 --- a/src/inference_engine_opencv_private.h +++ b/src/inference_engine_opencv_private.h @@ -57,10 +57,10 @@ namespace OpenCVImpl inference_model_format_e model_format) override; int GetInputTensorBuffers( - std::vector &buffers) override; + std::map &buffers) override; int GetOutputTensorBuffers( - std::vector &buffers) override; + std::map &buffers) override; int GetInputLayerProperty( inference_engine_layer_property &property) override; @@ -76,21 +76,21 @@ namespace OpenCVImpl int GetBackendCapacity(inference_engine_capacity *capacity) override; - int Run(std::vector &input_buffers, - std::vector &output_buffers) + int Run(std::map &input_buffers, + std::map &output_buffers) override; private: std::vector mInputData; cv::Mat mInputBlobs; - std::vector mInputTensorInfo; - std::vector mOutputTensorInfo; + std::map mInputLayers; + std::map mOutputLayers; std::vector mOutputBlobs; cv::dnn::Net mNet; /**< Network associated with a network model */ - std::vector mInputLayers; - std::vector mOutputLayers; + std::map mInputLayerId; + std::map mOutputLayerId; std::string mConfigFile; std::string mWeightFile; -- 2.7.4