Change vector<> type apis parameters to map<> type
authorTae-Young Chung <ty83.chung@samsung.com>
Fri, 8 Jan 2021 04:03:52 +0000 (13:03 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Fri, 8 Jan 2021 04:05:28 +0000 (13:05 +0900)
Paramters of GetInputTensorBuffers(), GetOutputTensorBuffers(), Run() are
changed from vector<inference_engine_tensor_buffer> type to
map<string, inference_engine_tensor_buffer> type.

From this patch, tensor buffers can be accessed by the layers names
corresponding to the buffers.

Refer to inference-engine-interface commit
"c80a06a675d7f61addb5385ebc01b5565f3acc66"

Change-Id: I6e483654409198e98c23835f379e408943a4e7ac
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index 78e4f64cc60114a500d0c616e2836809eb8d6c4d..d9d9f50d48215ecf268a2d24c3abd541a93f6a5d 100644 (file)
@@ -163,7 +163,7 @@ namespace TFLiteImpl
        }
 
        int InferenceTFLite::GetInputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
@@ -198,14 +198,14 @@ namespace TFLiteImpl
                                LOGE("Not supported");
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
-                       buffers.push_back(buffer);
+                       buffers.insert(std::make_pair(mInputLayer[idx], buffer));
                }
 
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
        int InferenceTFLite::GetOutputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                void *pBuff = NULL;
 
@@ -241,7 +241,7 @@ namespace TFLiteImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
                        }
 
-                       buffers.push_back(buffer);
+                       buffers.insert(std::make_pair(mOutputLayer[idx], buffer));
                }
                return INFERENCE_ENGINE_ERROR_NONE;
        }
@@ -390,8 +390,8 @@ namespace TFLiteImpl
        }
 
        int InferenceTFLite::Run(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");
                TfLiteStatus status = mInterpreter->Invoke();
index 0c665e000d280ad23fac0a140c7c0a4ba0e18179..e184bdf94e76fdfc051882dce1ef29d1d6266854 100644 (file)
@@ -58,10 +58,10 @@ namespace TFLiteImpl
                                 inference_model_format_e model_format) override;
 
                int GetInputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetOutputTensorBuffers(
-                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+                               std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
 
                int GetInputLayerProperty(
                                inference_engine_layer_property &property) override;
@@ -77,8 +77,8 @@ namespace TFLiteImpl
 
                int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
-               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+               int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                               std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
                                override;
 
        private: