Add multiple input and output tensors support
authorInki Dae <inki.dae@samsung.com>
Tue, 18 Feb 2020 02:37:41 +0000 (11:37 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 18 Feb 2020 02:37:41 +0000 (11:37 +0900)
Change-Id: Ibde983e23f3ba208b29494ece236675213aa52ef
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp

index ac0985fc7b73ba440ea5f1172e71e24ecd844f4b..021346da99077cd5d668bb9555802762da9ff234 100644 (file)
@@ -432,37 +432,48 @@ int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffe
         return err;
     }
 
-    armnn::BindingPointInfo outBindingInfo = mOutputBindingInfo.front();
-    armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
-    armnn::TensorShape shape = outputTensorInfo.GetShape();
+    std::vector<armnn::BindingPointInfo>::iterator binding_iter;
+    std::vector<inference_engine_tensor_buffer>::iterator buffer_iter;
 
-    // input
-    armnn::BindingPointInfo inBindingInfo = mInputBindingInfo.front();
-    armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
+    // Setup input layer.
+    armnn::InputTensors input_tensors;
 
-    int tensor_size = 1;
-    for (int i = 0; i < (int)outputTensorInfo.GetNumDimensions(); i++) {
-        tensor_size *= shape[i];
-    }
+    for (binding_iter = mInputBindingInfo.begin(), buffer_iter = input_buffers.begin();
+                binding_iter != mInputBindingInfo.end(); binding_iter++, buffer_iter++) {
+        armnn::BindingPointInfo inBindingInfo = *binding_iter;
+        armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
+        inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
 
-    LOGI("Output Tensor size = %d", tensor_size);
+        armnn::Tensor input_tensor(inputTensorInfo, tensor_buffer.buffer);
+        input_tensors.push_back({inBindingInfo.first, input_tensor});
 
-    // TODO. consider mutiple input and output.
+        armnn::TensorShape shape = inputTensorInfo.GetShape();
+        int tensor_size = 1;
+        for (int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
+            tensor_size *= shape[i];
+
+        LOGI("Input Tensor dimension = %d (size = %d)", inputTensorInfo.GetNumDimensions(), tensor_size);
+    }
 
-    armnn::Tensor output_tensor(outputTensorInfo, output_buffers.front().buffer);
-    armnn::Tensor input_tensor(inputTensorInfo, input_buffers.front().buffer);
+    // Setup output layer.
+    armnn::OutputTensors output_tensors;
 
-    std::vector<armnn::Tensor> OutputTensors;
-    OutputTensors.push_back(output_tensor);
+    for (binding_iter = mOutputBindingInfo.begin(), buffer_iter = output_buffers.begin();
+                binding_iter != mOutputBindingInfo.end(); binding_iter++, buffer_iter++) {
+        armnn::BindingPointInfo outBindingInfo = *binding_iter;
+        armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
+        inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
 
-    std::vector<armnn::Tensor> InputTensors;
-    InputTensors.push_back(input_tensor);
+        armnn::Tensor output_tensor(outputTensorInfo, tensor_buffer.buffer);
+        output_tensors.push_back({outBindingInfo.first, output_tensor});
 
-    armnn::InputTensors input_tensors;
-    armnn::OutputTensors output_tensors;
+        armnn::TensorShape shape = outputTensorInfo.GetShape();
+        int tensor_size = 1;
+        for (int i = 0; i < outputTensorInfo.GetNumDimensions(); i++)
+            tensor_size *= shape[i];
 
-    input_tensors.push_back({mInputBindingInfo[0].first, InputTensors.front()});
-    output_tensors.push_back({mOutputBindingInfo[0].first, OutputTensors.front()});
+        LOGI("Output Tensor dimension = %d (size = %d)", outputTensorInfo.GetNumDimensions(), tensor_size);
+    }
 
     armnn::Status ret = mRuntime->EnqueueWorkload(mNetworkIdentifier,
                                                   input_tensors, output_tensors);