Add user desired input and output layer support
authorInki Dae <inki.dae@samsung.com>
Tue, 18 Feb 2020 01:56:36 +0000 (10:56 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 18 Feb 2020 01:56:36 +0000 (10:56 +0900)
Change-Id: I33216ac409f8087d166154ff57b1cec4b2a80879
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp
src/inference_engine_armnn_private.h

index c29384dc7f9b7206b26127933451b5812d9f6137..ac0985fc7b73ba440ea5f1172e71e24ecd844f4b 100644 (file)
@@ -49,7 +49,17 @@ InferenceARMNN::InferenceARMNN(void) :
 
 InferenceARMNN::~InferenceARMNN()
 {
-    ;
+    mDesignated_inputs.clear();
+    std::vector<std::string>().swap(mDesignated_inputs);
+
+    mDesignated_outputs.clear();
+    std::vector<std::string>().swap(mDesignated_outputs);
+
+    mInputBindingInfo.clear();
+    std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+    mOutputBindingInfo.clear();
+    std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
 }
 
 inference_tensor_data_type_e InferenceARMNN::ConvertDataType(armnn::DataType type)
@@ -145,14 +155,45 @@ int InferenceARMNN::CreateTfLiteNetwork(std::string model_path)
     if (!mNetwork)
         return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
 
-    std::vector<std::string> in_names = parser->GetSubgraphInputTensorNames(0);
-    for (auto const &name:in_names) {
-        mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
+    // If there is any input layer designated by user then it is set as input layer.
+    // Otherwise, layer from armnn runtime will be set as input.
+    if (mDesignated_inputs.empty()) {
+        std::vector<std::string> in_names = parser->GetSubgraphInputTensorNames(0);
+        for (auto const &name:in_names) {
+            mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
+            LOGI("%s layer has been designated as input.", name.c_str());
+        }
+    } else {
+        mInputBindingInfo.clear();
+        std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+        std::vector<std::string>::iterator iter;
+        for (iter = mDesignated_inputs.begin(); iter != mDesignated_inputs.end();
+                iter++) {
+            std::string name = *iter;
+            mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
+            LOGI("%s layer has been designated as input.", name.c_str());
+        }
     }
 
-    std::vector<std::string> out_names = parser->GetSubgraphOutputTensorNames(0);
-    for (auto const &name:out_names) {
-        mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
+    // If there is any output layer designated by user then it is set as output layer.
+    // Otherwise, layer from armnn runtime will be set as output.
+    if (mDesignated_outputs.empty()) {
+        std::vector<std::string> out_names = parser->GetSubgraphOutputTensorNames(0);
+        for (auto const &name:out_names) {
+            mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
+            LOGI("%s layer has been designated as output.", name.c_str());
+        }
+    } else {
+        mOutputBindingInfo.clear();
+        std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+        std::vector<std::string>::iterator iter;
+        for (iter = mDesignated_outputs.begin(); iter != mDesignated_outputs.end(); iter++) {
+            std::string name = *iter;
+            mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
+            LOGI("%s layer has been designated as output.", name.c_str());
+        }
     }
 
     LOGI("Input Tensor count = %d", (int)mInputBindingInfo.size());
@@ -298,6 +339,46 @@ int InferenceARMNN::GetOutputLayerProperty(inference_engine_layer_property &prop
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
+int InferenceARMNN::SetInputLayerProperty(inference_engine_layer_property &property)
+{
+    LOGI("ENTER");
+
+    std::vector<std::string>::iterator iter;
+    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+        std::string name = *iter;
+        LOGI("input layer name = %s", name.c_str());
+    }
+
+    mDesignated_inputs.clear();
+    std::vector<std::string>().swap(mDesignated_inputs);
+
+    mDesignated_inputs = property.layer_names;
+
+    LOGI("LEAVE");
+
+    return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceARMNN::SetOutputLayerProperty(inference_engine_layer_property &property)
+{
+    LOGI("ENTER");
+
+    std::vector<std::string>::iterator iter;
+    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+        std::string name = *iter;
+        LOGI("input layer name = %s", name.c_str());
+    }
+
+    mDesignated_outputs.clear();
+    std::vector<std::string>().swap(mDesignated_outputs);
+
+    mDesignated_outputs = property.layer_names;
+
+    LOGI("LEAVE");
+
+    return INFERENCE_ENGINE_ERROR_NONE;
+}
+
 int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
 {
     LOGI("ENTER");
@@ -315,11 +396,42 @@ int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
+
+int InferenceARMNN::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                                        std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+    int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+    LOGI("ENTER");
+
+    if (input_buffers.size() != mInputBindingInfo.size()) {
+        LOGE("input size(%d) is different from input binding info's one(%d).",
+                input_buffers.size(), mInputBindingInfo.size());
+        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+    }
+
+    if (output_buffers.size() != mOutputBindingInfo.size()) {
+        LOGE("output size(%d) is different from output binding info's one(%d).",
+                output_buffers.size(), mOutputBindingInfo.size());
+        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+    }
+
+    LOGI("LEAVE");
+
+    return ret;
+}
+
 int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
                         std::vector<inference_engine_tensor_buffer> &output_buffers)
 {
     LOGI("ENTER");
 
+    // Make sure to check if tensor buffer count and binding info one are same.
+    int err = CheckTensorBuffers(input_buffers, output_buffers);
+    if (err != INFERENCE_ENGINE_ERROR_NONE) {
+        return err;
+    }
+
     armnn::BindingPointInfo outBindingInfo = mOutputBindingInfo.front();
     armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
     armnn::TensorShape shape = outputTensorInfo.GetShape();
index 645b538b6d560780ef743d28a385c64a2f511a49..2fb13d812c620ae6ae2f4e89d7afb6bfc44f52a1 100644 (file)
@@ -59,6 +59,10 @@ public:
 
     int GetOutputLayerProperty(inference_engine_layer_property &property) override;
 
+    int SetInputLayerProperty(inference_engine_layer_property &property) override;
+
+    int SetOutputLayerProperty(inference_engine_layer_property &property) override;
+
     int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
     int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
@@ -72,6 +76,8 @@ private:
     void *AllocateTensorBuffer(armnn::DataType type, int tensor_size);
     inference_tensor_data_type_e ConvertDataType(armnn::DataType type);
     void ReleaseTensorBuffer(armnn::DataType type, void *tensor_buffer);
+    int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                            std::vector<inference_engine_tensor_buffer> &output_buffers);
 
     std::vector<armnn::BackendId> mAccelType;
 
@@ -82,6 +88,8 @@ private:
 
     std::vector<armnn::BindingPointInfo> mInputBindingInfo;
     std::vector<armnn::BindingPointInfo> mOutputBindingInfo;
+    std::vector<std::string> mDesignated_inputs;
+    std::vector<std::string> mDesignated_outputs;
 };
 
 } /* InferenceEngineImpl */