Change members of inference_engine_layer_property structure, 39/254939/3 accepted/tizen_6.5_unified accepted/tizen_7.0_unified accepted/tizen_7.0_unified_hotfix accepted/tizen_8.0_unified accepted/tizen_unified tizen tizen_6.5 tizen_7.0 tizen_7.0_hotfix tizen_8.0 tizen_devel accepted/tizen/6.5/unified/20211028.120002 accepted/tizen/7.0/unified/20221110.060616 accepted/tizen/7.0/unified/hotfix/20221116.105359 accepted/tizen/8.0/unified/20231005.093423 accepted/tizen/unified/20210509.123803 accepted/tizen/unified/20210608.131143 accepted/tizen/unified/20220110.140015 submit/tizen/20210422.072212 submit/tizen/20210428.062907 submit/tizen/20210506.010918 submit/tizen/20210507.005054 submit/tizen/20210513.034723 submit/tizen/20210513.045159 submit/tizen/20210604.014750 submit/tizen/20220105.080154 submit/tizen/20220105.081745 submit/tizen_6.5/20211028.162401 tizen_6.5.m2_release tizen_7.0_m2_release tizen_8.0_m2_release
authorTae-Young Chung <ty83.chung@samsung.com>
Thu, 11 Mar 2021 01:17:01 +0000 (10:17 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Tue, 16 Mar 2021 07:36:50 +0000 (16:36 +0900)
and change vector<inference_engine_tensor_buffer> to map<string, inference_engine_tensor_buffer>

This is based on
https://review.tizen.org/gerrit/#/c/platform/core/multimedia/inference-engine-interface/+/254892/
https://review.tizen.org/gerrit/#/c/platform/core/api/mediavision/+/254953/

Change-Id: I9316e46cade0e51aa18b9d339d2f618949489e0c
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
packaging/inference-engine-opencv.spec
src/inference_engine_opencv.cpp
src/inference_engine_opencv_private.h

index b9860ea..513e638 100644 (file)
@@ -1,7 +1,7 @@
 Name:       inference-engine-opencv
 Summary:    OpenCV based implementation of inference-engine-interface
 Version:    0.0.2
-Release:    6
+Release:    7
 Group:      Multimedia/Libraries
 License:    Apache-2.0
 Source0:    %{name}-%{version}.tar.gz
index 3d12674..c85eebe 100644 (file)
@@ -50,7 +50,7 @@ namespace OpenCVImpl
        {
                LOGI("ENTER");
 
-               LOGI("Inferece targets are: ");
+               LOGI("Inferece target is [%d]", types);
                switch (types) {
                case INFERENCE_TARGET_CPU:
                        mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
@@ -61,7 +61,7 @@ namespace OpenCVImpl
                case INFERENCE_TARGET_CUSTOM:
                case INFERENCE_TARGET_NONE:
                default:
-                       LOGE("Not supported device type [%d], Set CPU mode", (int) types);
+                       LOGE("Not supported device type [%d], Set CPU mode", types);
                        mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
                }
                LOGI("LEAVE");
@@ -124,19 +124,16 @@ namespace OpenCVImpl
        }
 
        int InferenceOpenCV::GetInputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
                mInputData.clear();
 
                void *pBuff = NULL;
-               std::vector<inference_engine_tensor_info>::iterator info_iter;
-               for (info_iter = mInputTensorInfo.begin();
-                        info_iter != mInputTensorInfo.end(); ++info_iter) {
-                       cv::Mat inputBlob(cv::Size((*info_iter).shape[3],
-                                                                          (*info_iter).shape[2]),
-                                                         CV_32FC3);
+               for (auto& layer : mInputLayers) {
+                       std::vector<size_t>& shape = layer.second.shape;
+                       cv::Mat inputBlob(cv::Size(shape[3], shape[2]), CV_32FC3);
                        mInputData.push_back(inputBlob);
 
                        pBuff = mInputData.back().ptr<void *>(0);
@@ -148,7 +145,7 @@ namespace OpenCVImpl
                        inference_engine_tensor_buffer buffer = {
                                pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
                        };
-                       buffers.push_back(buffer);
+                       buffers.insert(std::make_pair(layer.first, buffer));
                }
 
                LOGI("LEAVE");
@@ -157,36 +154,39 @@ namespace OpenCVImpl
        }
 
        int InferenceOpenCV::GetOutputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
                mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
                                                                                          cv::Scalar(), false, false);
 
-               mNet.setInput(mInputBlobs, mInputLayers.front());
+               mNet.setInput(mInputBlobs, mInputLayers.begin()->first);
 
-               std::vector<cv::String> ouputLayers(mOutputLayers.begin(),
-                                                                                       mOutputLayers.end());
-               mNet.forward(mOutputBlobs, ouputLayers);
+               std::vector<cv::String> outputLayers;
+               for (auto& layer : mOutputLayers) {
+                       LOGI("output layer: %s", layer.first.c_str());
+                       outputLayers.push_back(layer.first);
+               }
+
+               mNet.forward(mOutputBlobs, outputLayers);
 
                void *pBuff = NULL;
-               std::vector<cv::Mat>::iterator iter;
-               for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
-                       pBuff = (*iter).ptr<void *>(0);
-                       size_t sizeBuff = (*iter).total() * (*iter).elemSize();
+               std::map<std::string, int>().swap(mOutputLayerId);
+               for (size_t idx = 0; idx < mOutputBlobs.size(); ++idx) {
+                       pBuff = mOutputBlobs[idx].ptr<void *>(0);
+                       size_t sizeBuff = mOutputBlobs[idx].total() * mOutputBlobs[idx].elemSize();
                        inference_engine_tensor_buffer buffer = {
                                pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
                        };
-                       buffers.push_back(buffer);
+
+                       LOGI("output layer: %s with %p", outputLayers[idx].c_str(), buffer.buffer);
+                       buffers.insert(std::make_pair(outputLayers[idx], buffer));
+                       mOutputLayerId.insert(std::make_pair(outputLayers[idx], mNet.getLayerId(outputLayers[idx])));
                }
 
                if (buffers.empty()) {
                        LOGI("buff empty");
-                       inference_engine_tensor_buffer buffer = {
-                               nullptr, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 1
-                       };
-                       buffers.push_back(buffer);
                }
 
                LOGI("LEAVE");
@@ -203,8 +203,7 @@ namespace OpenCVImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               property.layer_names = mInputLayers;
-               property.tensor_infos = mInputTensorInfo;
+               property.layers = mInputLayers;
 
                LOGI("LEAVE");
 
@@ -221,27 +220,29 @@ namespace OpenCVImpl
                }
 
                int lid = -1;
-               int idx = 0;
-               std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
-               for (std::vector<std::string>::iterator iter = mOutputLayers.begin();
-                        iter != mOutputLayers.end(); ++iter, ++idx) {
-                       LOGI("output layer: %s", (*iter).c_str());
-                       lid = mNet.getLayerId((*iter));
+               std::vector<std::vector<int>> inputTensorShapes;
+               for (auto& layer : mInputLayers) {
+                       inputTensorShapes.push_back(std::vector<int>(
+                                               layer.second.shape.begin(),
+                                               layer.second.shape.end()));
+                       LOGI("input layer[%s]: %zu, %zu, %zu, %zu",layer.first.c_str(),
+                                layer.second.shape[0], layer.second.shape[1],
+                                layer.second.shape[2], layer.second.shape[3]);
+               }
+
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+               for (auto& layer : mOutputLayerId) {
+                       LOGI("output layer: %s", layer.first.c_str());
+                       lid = layer.second;
                        LOGI("output layer Id: %d", lid);
                        if (lid < 0) {
-                               LOGE("Invalid output %s layer", (*iter).c_str());
+                               LOGE("Invalid output %s layer", layer.first.c_str());
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
                        std::vector<cv::dnn::MatShape> lInputShape, lOutputShape;
-                       LOGI("%zu, %zu, %zu, %zu", mInputTensorInfo[idx].shape[0],
-                                mInputTensorInfo[idx].shape[1], mInputTensorInfo[idx].shape[2],
-                                mInputTensorInfo[idx].shape[3]);
-
-                       std::vector<int> cvInputTensorShape(
-                                       mInputTensorInfo[idx].shape.begin(),
-                                       mInputTensorInfo[idx].shape.end());
-                       mNet.getLayerShapes(cvInputTensorShape, lid, lInputShape,
+
+                       mNet.getLayerShapes(inputTensorShapes, lid, lInputShape,
                                                                lOutputShape);
                        inference_engine_tensor_info tensor_info;
                        tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
@@ -255,17 +256,14 @@ namespace OpenCVImpl
 
                        tensor_info.size = 1;
                        LOGE("tensor_info");
-                       for (std::vector<size_t>::iterator iter2 =
-                                                tensor_info.shape.begin();
-                                iter2 != tensor_info.shape.end(); ++iter2) {
-                               LOGI("%zu", (*iter2));
-                               tensor_info.size *= (*iter2);
+                       for (auto& dim : tensor_info.shape) {
+                               LOGI("%zu", dim);
+                               tensor_info.size *= dim;
                        }
-                       mOutputTensorInfo.push_back(tensor_info);
+                       mOutputLayers.insert(std::make_pair(layer.first, tensor_info));
                }
 
-               property.layer_names = mOutputLayers;
-               property.tensor_infos = mOutputTensorInfo;
+               property.layers = mOutputLayers;
 
                LOGI("LEAVE");
 
@@ -277,21 +275,14 @@ namespace OpenCVImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("input layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("input layer name = %s", layer.first.c_str());
                }
 
                mInputLayers.clear();
-               std::vector<std::string>().swap(mInputLayers);
+               std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
 
-               mInputTensorInfo.clear();
-               std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
-               mInputLayers = property.layer_names;
-               mInputTensorInfo = property.tensor_infos;
+               mInputLayers = property.layers;
 
                LOGI("LEAVE");
 
@@ -301,17 +292,14 @@ namespace OpenCVImpl
        int InferenceOpenCV::SetOutputLayerProperty(
                        inference_engine_layer_property &property)
        {
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("output layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("output layer name = %s", layer.first.c_str());
                }
 
                mOutputLayers.clear();
-               std::vector<std::string>().swap(mOutputLayers);
+               std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
 
-               mOutputLayers = property.layer_names;
+               mOutputLayers = property.layers;
 
                return INFERENCE_ENGINE_ERROR_NONE;
        }
@@ -333,21 +321,21 @@ namespace OpenCVImpl
        }
 
        int InferenceOpenCV::Run(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");
 
                // need to check memoery
                mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
-                                                                                         cv::Scalar(), false, false);
+                                                                                         cv::Scalar(), true, false);
 
                // Currently it supports that one input layer with multiple input tensors.
                // it doesn't support that mulitple input layer with multiple input tensors.
                // To suppor that, setInput is called manually while we matching inputblobs
                // and their corresponding input layer.
                // Suppose a case that an input layer and mulitple input tensors are given.
-               mNet.setInput(mInputBlobs, mInputLayers.front());
+               mNet.setInput(mInputBlobs, input_buffers.begin()->first);
 
                if (mOutputBlobs.size() != output_buffers.size()) {
                        LOGE("output_buffers size is %zu but outputBlobs %zu",
@@ -355,8 +343,10 @@ namespace OpenCVImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
 
-               std::vector<cv::String> outputLayers(mOutputLayers.begin(),
-                                                                                        mOutputLayers.end());
+               std::vector<cv::String> outputLayers;
+               for (auto& layer : mOutputLayers) {
+                       outputLayers.push_back(layer.first);
+               }
 
                mNet.forward(mOutputBlobs, outputLayers);
 
@@ -369,8 +359,11 @@ namespace OpenCVImpl
                        cvOutputData.at<float>(0, 0) = mOutputBlobs[0].size[2];
                }
 
-               for (unsigned int k = 0; k < output_buffers.size(); ++k)
-                       output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
+               int idx = 0;
+               for (auto iter = output_buffers.begin(); iter != output_buffers.end(); ++iter, ++idx) {
+                       LOGI("output: %s", iter->first.c_str());
+                       iter->second.buffer = mOutputBlobs[idx].ptr<void *>(0);
+               }
 
                LOGI("LEAVE");
 
index ad7cc46..a3eb84a 100644 (file)
@@ -57,10 +57,10 @@ namespace OpenCVImpl
                                 inference_model_format_e model_format) override;
 
                int GetInputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetOutputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetInputLayerProperty(
                                inference_engine_layer_property &property) override;
@@ -76,21 +76,21 @@ namespace OpenCVImpl
 
                int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
-               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+               int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                               std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
                                override;
 
        private:
                std::vector<cv::Mat> mInputData;
                cv::Mat mInputBlobs;
 
-               std::vector<inference_engine_tensor_info> mInputTensorInfo;
-               std::vector<inference_engine_tensor_info> mOutputTensorInfo;
+               std::map<std::string, inference_engine_tensor_info> mInputLayers;
+               std::map<std::string, inference_engine_tensor_info> mOutputLayers;
                std::vector<cv::Mat> mOutputBlobs;
                cv::dnn::Net mNet; /**< Network associated with a network model */
 
-               std::vector<std::string> mInputLayers;
-               std::vector<std::string> mOutputLayers;
+               std::map<std::string, int> mInputLayerId;
+               std::map<std::string, int> mOutputLayerId;
 
                std::string mConfigFile;
                std::string mWeightFile;