{
LOGI("ENTER");
- // Upper layer will allocate input tensor buffer/buffers.
+ // Upper layer will allocate input tensor buffer/buffers.
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
+int InferenceARMNN::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+{
+ LOGI("ENTER");
+
+ // Upper layer will allocate output tensor buffer/buffers.
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+
int InferenceARMNN::GetInputLayerProperty(inference_engine_layer_property &property)
{
LOGI("ENTER");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffers)
+int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");
// TODO. consider mutiple input and output.
- void *outputData = AllocateTensorBuffer((armnn::DataType)outputTensorInfo.GetDataType(),
- tensor_size);
- if (outputData == nullptr) {
- LOGE("Fail to allocate tensor buffer.");
- return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
- }
-
- armnn::Tensor output_tensor(outputTensorInfo, outputData);
+ armnn::Tensor output_tensor(outputTensorInfo, output_buffers.front().buffer);
armnn::Tensor input_tensor(inputTensorInfo, input_buffers.front().buffer);
- mOutputTensor.clear();
- mOutputTensor.push_back(output_tensor);
+ std::vector<armnn::Tensor> OutputTensors;
+ OutputTensors.push_back(output_tensor);
std::vector<armnn::Tensor> InputTensors;
InputTensors.push_back(input_tensor);
armnn::OutputTensors output_tensors;
input_tensors.push_back({mInputBindingInfo[0].first, InputTensors.front()});
- output_tensors.push_back({mOutputBindingInfo[0].first, mOutputTensor.front()});
+ output_tensors.push_back({mOutputBindingInfo[0].first, OutputTensors.front()});
armnn::Status ret = mRuntime->EnqueueWorkload(mNetworkIdentifier,
input_tensors, output_tensors);
{
LOGI("ENTER");
- armnn::Tensor output_tensor = mOutputTensor.front();
- armnn::TensorShape shape = output_tensor.GetShape();
-
- std::vector<int> tmpDimInfo;
- for (int i = 0; i < (int)output_tensor.GetNumDimensions(); i++) {
- tmpDimInfo.push_back(shape[i]);
- }
-
- results.dimInfo.push_back(tmpDimInfo);
- results.data.push_back((void *)output_tensor.GetMemoryArea());
-
- // TODO. when should output_tensor buffer be released?
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
+ int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
+
int GetInputLayerProperty(inference_engine_layer_property &property) override;
int GetOutputLayerProperty(inference_engine_layer_property &property) override;
int GetBackendCapacity(inference_engine_capacity *capacity) override;
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers) override;
+ int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers) override;
int GetInferenceResult(tensor_t& results) override;
std::vector<armnn::BindingPointInfo> mInputBindingInfo;
std::vector<armnn::BindingPointInfo> mOutputBindingInfo;
-
- std::vector<armnn::Tensor> mOutputTensor;
};
} /* InferenceEngineImpl */