#include <armnn/ArmNN.hpp>
#include <armnnTfLiteParser/ITfLiteParser.hpp>
-namespace InferenceEngineImpl {
-namespace ARMNNImpl {
-
-InferenceARMNN::InferenceARMNN(void) :
- mRuntime(nullptr, &armnn::IRuntime::Destroy),
- mNetwork(armnn::INetworkPtr(nullptr, nullptr))
-{
- LOGI("ENTER");
- LOGI("LEAVE");
-}
-
-InferenceARMNN::~InferenceARMNN()
-{
- mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
-
- mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
-
- mInputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
-
- mOutputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
-}
-
-int InferenceARMNN::SetPrivateData(void *data)
-{
- // Nothing to do yet.
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-inference_tensor_data_type_e InferenceARMNN::ConvertDataType(armnn::DataType type)
-{
- inference_tensor_data_type_e data_type;
-
- LOGI("ENTER");
-
- LOGI("data type = %d", (int)type);
-
- switch (type) {
- case armnn::DataType::Float32:
- data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- break;
- case armnn::DataType::QuantisedAsymm8:
- data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- break;
- default:
- LOGE("Invalid Input tensor type so it will use float32 in default.");
- data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- break;
- }
-
- LOGI("LEAVE");
-
- return data_type;
-}
-
-int InferenceARMNN::SetTargetDevices(int types)
-{
- LOGI("ENTER");
-
-
- LOGI("Inference targets are, ");
- if (types & INFERENCE_TARGET_CPU) {
- mAccelType.push_back(armnn::Compute::CpuAcc);
- LOGI("CPU");
- }
-
- if (types & INFERENCE_TARGET_GPU) {
- mAccelType.push_back(armnn::Compute::GpuAcc);
- LOGI("GPU");
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::CreateTfLiteNetwork(std::string model_path)
-{
- LOGI("ENTER");
-
- armnnTfLiteParser::ITfLiteParserPtr parser =
- armnnTfLiteParser::ITfLiteParser::Create();
- if (!parser) {
- LOGE("Fail to create a parser.");
- return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
- }
-
- mNetwork = parser->CreateNetworkFromBinaryFile(model_path.c_str());
- if (!mNetwork) {
- LOGE("Fail to create a network.");
- return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
- }
-
- // If there is any input layer designated by user then it is set as input layer.
- // Otherwise, layer from armnn runtime will be set as input.
- if (mDesignated_inputs.empty()) {
- std::vector<std::string> in_names = parser->GetSubgraphInputTensorNames(0);
- for (auto const &name:in_names) {
- mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
- LOGI("%s layer has been designated as input.", name.c_str());
- }
- } else {
- mInputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
-
- std::vector<std::string>::iterator iter;
- for (iter = mDesignated_inputs.begin(); iter != mDesignated_inputs.end();
- iter++) {
- std::string name = *iter;
- mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
- LOGI("%s layer has been designated as input.", name.c_str());
- }
- }
-
- // If there is any output layer designated by user then it is set as output layer.
- // Otherwise, layer from armnn runtime will be set as output.
- if (mDesignated_outputs.empty()) {
- std::vector<std::string> out_names = parser->GetSubgraphOutputTensorNames(0);
- for (auto const &name:out_names) {
- mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
- LOGI("%s layer has been designated as output.", name.c_str());
- }
- } else {
- mOutputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
-
- std::vector<std::string>::iterator iter;
- for (iter = mDesignated_outputs.begin(); iter != mDesignated_outputs.end(); iter++) {
- std::string name = *iter;
- mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
- LOGI("%s layer has been designated as output.", name.c_str());
- }
- }
-
- LOGI("Input Tensor count = %d", (int)mInputBindingInfo.size());
- LOGI("Output Tensor count = %d", (int)mOutputBindingInfo.size());
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::CreateNetwork(std::vector<std::string> model_paths, inference_model_format_e model_format)
-{
- LOGI("ENTER");
-
- // Make sure to check if a given model format is supported or not.
- if (model_format != INFERENCE_MODEL_CAFFE &&
- model_format != INFERENCE_MODEL_TF &&
- model_format != INFERENCE_MODEL_TFLITE &&
- model_format != INFERENCE_MODEL_ONNX) {
- LOGE("Invalid model format.");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- int ret = INFERENCE_ENGINE_ERROR_NONE;
-
- switch ((int)model_format) {
- case INFERENCE_MODEL_CAFFE:
- case INFERENCE_MODEL_TF:
- case INFERENCE_MODEL_ONNX:
- ret = INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- // TODO. Call a proper parser.
- break;
- case INFERENCE_MODEL_TFLITE:
- std::string model_path = model_paths[0];
- if (access(model_path.c_str(), F_OK)) {
- LOGE("modelFilePath in [%s] ", model_path.c_str());
- ret = INFERENCE_ENGINE_ERROR_INVALID_PATH;
- break;
- }
-
- LOGI("It will try to load %s model file", model_path.c_str());
- return CreateTfLiteNetwork(model_path);
- }
-
- LOGE("Model format not supported.");
-
- LOGI("LEAVE");
-
- return ret;
-}
-
-int InferenceARMNN::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
-{
- LOGI("ENTER");
-
- int ret = INFERENCE_ENGINE_ERROR_NONE;
-
- armnn::IRuntime::CreationOptions creation_options;
- mRuntime = armnn::IRuntime::Create(creation_options);
-
- LOGI("Created ARMNN runtime");
-
- ret = CreateNetwork(model_paths, model_format);
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- return ret;
-
- // In default, add CpuRef as fallback.
- mAccelType.push_back(armnn::Compute::CpuRef);
-
- // Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc
- armnn::IOptimizedNetworkPtr optimizedNet =
- armnn::Optimize(*mNetwork, mAccelType, mRuntime->GetDeviceSpec());
- if (!optimizedNet) {
- LOGE("Fail to optimize network.");
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("Optimized Network.");
-
- // Load the optimized network onto the runtime device
- armnn::Status status = mRuntime->LoadNetwork(mNetworkIdentifier,
- std::move(optimizedNet));
- if (status == armnn::Status::Failure)
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-
- LOGI("Loaded the Network.");
-
- LOGI("LEAVE");
-
- return ret;
-}
-
-int InferenceARMNN::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+namespace InferenceEngineImpl
{
- LOGI("ENTER");
-
- // Upper layer will allocate input tensor buffer/buffers.
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
- LOGI("ENTER");
-
- // Upper layer will allocate output tensor buffer/buffers.
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-
-int InferenceARMNN::GetInputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- // TODO. Need to check if model file loading is done.
-
- std::vector<armnn::BindingPointInfo>::iterator iter;
- for (iter = mInputBindingInfo.begin(); iter != mInputBindingInfo.end(); iter++) {
- inference_engine_tensor_info out_info;
- armnn::BindingPointInfo bindingInfo = *iter;
- armnn::TensorInfo tensorInfo = bindingInfo.second;
- armnn::TensorShape shape = tensorInfo.GetShape();
- size_t tensor_size = 1;
-
- for (int i = 0; i < (int)tensorInfo.GetNumDimensions(); i++) {
- out_info.shape.push_back(shape[i]);
- tensor_size *= shape[i];
- }
-
- out_info.data_type = ConvertDataType((armnn::DataType)tensorInfo.GetDataType());
- out_info.size = tensor_size;
- property.tensor_infos.push_back(out_info);
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::GetOutputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- // TODO. Need to check if model file loading is done.
-
- std::vector<armnn::BindingPointInfo>::iterator iter;
- for (iter = mOutputBindingInfo.begin(); iter != mOutputBindingInfo.end(); iter++) {
- inference_engine_tensor_info out_info;
- armnn::BindingPointInfo bindingInfo = *iter;
- armnn::TensorInfo tensorInfo = bindingInfo.second;
- armnn::TensorShape shape = tensorInfo.GetShape();
- size_t tensor_size = 1;
-
- for (int i = 0; i < (int)tensorInfo.GetNumDimensions(); i++) {
- out_info.shape.push_back(shape[i]);
- tensor_size *= shape[i];
- }
-
- out_info.data_type = ConvertDataType((armnn::DataType)tensorInfo.GetDataType());
- out_info.size = tensor_size;
- property.tensor_infos.push_back(out_info);
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::SetInputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("input layer name = %s", name.c_str());
- }
-
- mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
-
- mDesignated_inputs = property.layer_names;
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::SetOutputLayerProperty(inference_engine_layer_property &property)
+namespace ARMNNImpl
{
- LOGI("ENTER");
+ InferenceARMNN::InferenceARMNN(void)
+ : mRuntime(nullptr, &armnn::IRuntime::Destroy)
+ , mNetwork(armnn::INetworkPtr(nullptr, nullptr))
+ {
+ LOGI("ENTER");
+ LOGI("LEAVE");
+ }
+
+ InferenceARMNN::~InferenceARMNN()
+ {
+ mDesignated_inputs.clear();
+ std::vector<std::string>().swap(mDesignated_inputs);
+
+ mDesignated_outputs.clear();
+ std::vector<std::string>().swap(mDesignated_outputs);
+
+ mInputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+ mOutputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+ }
+
+ int InferenceARMNN::SetPrivateData(void *data)
+ {
+ // Nothing to do yet.
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ inference_tensor_data_type_e
+ InferenceARMNN::ConvertDataType(armnn::DataType type)
+ {
+ inference_tensor_data_type_e data_type;
+
+ LOGI("ENTER");
+
+ LOGI("data type = %d", (int) type);
+
+ switch (type) {
+ case armnn::DataType::Float32:
+ data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ case armnn::DataType::QuantisedAsymm8:
+ data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ break;
+ default:
+ LOGE("Invalid Input tensor type so it will use float32 in default.");
+ data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ }
+
+ LOGI("LEAVE");
+
+ return data_type;
+ }
+
+ int InferenceARMNN::SetTargetDevices(int types)
+ {
+ LOGI("ENTER");
+
+ LOGI("Inference targets are, ");
+ if (types & INFERENCE_TARGET_CPU) {
+ mAccelType.push_back(armnn::Compute::CpuAcc);
+ LOGI("CPU");
+ }
+
+ if (types & INFERENCE_TARGET_GPU) {
+ mAccelType.push_back(armnn::Compute::GpuAcc);
+ LOGI("GPU");
+ }
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceARMNN::CreateTfLiteNetwork(std::string model_path)
+ {
+ LOGI("ENTER");
+
+ armnnTfLiteParser::ITfLiteParserPtr parser =
+ armnnTfLiteParser::ITfLiteParser::Create();
+ if (!parser) {
+ LOGE("Fail to create a parser.");
+ return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
+ }
+
+ mNetwork = parser->CreateNetworkFromBinaryFile(model_path.c_str());
+ if (!mNetwork) {
+ LOGE("Fail to create a network.");
+ return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
+ }
+
+ // If there is any input layer designated by user then it is set as input layer.
+ // Otherwise, layer from armnn runtime will be set as input.
+ if (mDesignated_inputs.empty()) {
+ std::vector<std::string> in_names =
+ parser->GetSubgraphInputTensorNames(0);
+ for (auto const &name : in_names) {
+ mInputBindingInfo.push_back(
+ parser->GetNetworkInputBindingInfo(0, name));
+ LOGI("%s layer has been designated as input.", name.c_str());
+ }
+ } else {
+ mInputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+ std::vector<std::string>::iterator iter;
+ for (iter = mDesignated_inputs.begin();
+ iter != mDesignated_inputs.end(); iter++) {
+ std::string name = *iter;
+ mInputBindingInfo.push_back(
+ parser->GetNetworkInputBindingInfo(0, name));
+ LOGI("%s layer has been designated as input.", name.c_str());
+ }
+ }
+
+ // If there is any output layer designated by user then it is set as output layer.
+ // Otherwise, layer from armnn runtime will be set as output.
+ if (mDesignated_outputs.empty()) {
+ std::vector<std::string> out_names =
+ parser->GetSubgraphOutputTensorNames(0);
+ for (auto const &name : out_names) {
+ mOutputBindingInfo.push_back(
+ parser->GetNetworkOutputBindingInfo(0, name));
+ LOGI("%s layer has been designated as output.", name.c_str());
+ }
+ } else {
+ mOutputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+ std::vector<std::string>::iterator iter;
+ for (iter = mDesignated_outputs.begin();
+ iter != mDesignated_outputs.end(); iter++) {
+ std::string name = *iter;
+ mOutputBindingInfo.push_back(
+ parser->GetNetworkOutputBindingInfo(0, name));
+ LOGI("%s layer has been designated as output.", name.c_str());
+ }
+ }
+
+ LOGI("Input Tensor count = %d", (int) mInputBindingInfo.size());
+ LOGI("Output Tensor count = %d", (int) mOutputBindingInfo.size());
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceARMNN::CreateNetwork(std::vector<std::string> model_paths,
+ inference_model_format_e model_format)
+ {
+ LOGI("ENTER");
+
+ // Make sure to check if a given model format is supported or not.
+ if (model_format != INFERENCE_MODEL_CAFFE &&
+ model_format != INFERENCE_MODEL_TF &&
+ model_format != INFERENCE_MODEL_TFLITE &&
+ model_format != INFERENCE_MODEL_ONNX) {
+ LOGE("Invalid model format.");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+ switch ((int) model_format) {
+ case INFERENCE_MODEL_CAFFE:
+ case INFERENCE_MODEL_TF:
+ case INFERENCE_MODEL_ONNX:
+ ret = INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ // TODO. Call a proper parser.
+ break;
+ case INFERENCE_MODEL_TFLITE:
+ std::string model_path = model_paths[0];
+ if (access(model_path.c_str(), F_OK)) {
+ LOGE("modelFilePath in [%s] ", model_path.c_str());
+ ret = INFERENCE_ENGINE_ERROR_INVALID_PATH;
+ break;
+ }
+
+ LOGI("It will try to load %s model file", model_path.c_str());
+ return CreateTfLiteNetwork(model_path);
+ }
+
+ LOGE("Model format not supported.");
+
+ LOGI("LEAVE");
+
+ return ret;
+ }
+
+ int InferenceARMNN::Load(std::vector<std::string> model_paths,
+ inference_model_format_e model_format)
+ {
+ LOGI("ENTER");
+
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+ armnn::IRuntime::CreationOptions creation_options;
+ mRuntime = armnn::IRuntime::Create(creation_options);
+
+ LOGI("Created ARMNN runtime");
+
+ ret = CreateNetwork(model_paths, model_format);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
+
+ // In default, add CpuRef as fallback.
+ mAccelType.push_back(armnn::Compute::CpuRef);
+
+ // Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
+ *mNetwork, mAccelType, mRuntime->GetDeviceSpec());
+ if (!optimizedNet) {
+ LOGE("Fail to optimize network.");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("Optimized Network.");
+
+ // Load the optimized network onto the runtime device
+ armnn::Status status = mRuntime->LoadNetwork(mNetworkIdentifier,
+ std::move(optimizedNet));
+ if (status == armnn::Status::Failure)
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+
+ LOGI("Loaded the Network.");
+
+ LOGI("LEAVE");
+
+ return ret;
+ }
+
+ int InferenceARMNN::GetInputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers)
+ {
+ LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("output layer name = %s", name.c_str());
- }
+ // Upper layer will allocate input tensor buffer/buffers.
- mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
+ LOGI("LEAVE");
- mDesignated_outputs = property.layer_names;
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
- LOGI("LEAVE");
+ int InferenceARMNN::GetOutputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers)
+ {
+ LOGI("ENTER");
+
+ // Upper layer will allocate output tensor buffer/buffers.
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceARMNN::GetInputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ LOGI("ENTER");
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
-{
- LOGI("ENTER");
+ // TODO. Need to check if model file loading is done.
- if (capacity == NULL) {
- LOGE("Bad pointer.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
+ std::vector<armnn::BindingPointInfo>::iterator iter;
+ for (iter = mInputBindingInfo.begin(); iter != mInputBindingInfo.end();
+ iter++) {
+ inference_engine_tensor_info out_info;
+ armnn::BindingPointInfo bindingInfo = *iter;
+ armnn::TensorInfo tensorInfo = bindingInfo.second;
+ armnn::TensorShape shape = tensorInfo.GetShape();
+ size_t tensor_size = 1;
- capacity->supported_accel_devices = INFERENCE_TARGET_CPU |
- INFERENCE_TARGET_GPU;
+ for (int i = 0; i < (int) tensorInfo.GetNumDimensions(); i++) {
+ out_info.shape.push_back(shape[i]);
+ tensor_size *= shape[i];
+ }
- LOGI("LEAVE");
+ out_info.data_type =
+ ConvertDataType((armnn::DataType) tensorInfo.GetDataType());
+ out_info.size = tensor_size;
+ property.tensor_infos.push_back(out_info);
+ }
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ LOGI("LEAVE");
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
-int InferenceARMNN::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
- int ret = INFERENCE_ENGINE_ERROR_NONE;
+ int InferenceARMNN::GetOutputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ LOGI("ENTER");
- LOGI("ENTER");
+ // TODO. Need to check if model file loading is done.
- if (input_buffers.size() != mInputBindingInfo.size()) {
- LOGE("input size(%zu) is different from input binding info's one(%zu).",
- input_buffers.size(), mInputBindingInfo.size());
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
+ std::vector<armnn::BindingPointInfo>::iterator iter;
+ for (iter = mOutputBindingInfo.begin();
+ iter != mOutputBindingInfo.end(); iter++) {
+ inference_engine_tensor_info out_info;
+ armnn::BindingPointInfo bindingInfo = *iter;
+ armnn::TensorInfo tensorInfo = bindingInfo.second;
+ armnn::TensorShape shape = tensorInfo.GetShape();
+ size_t tensor_size = 1;
- if (output_buffers.size() != mOutputBindingInfo.size()) {
- LOGE("output size(%zu) is different from output binding info's one(%zu).",
- output_buffers.size(), mOutputBindingInfo.size());
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
+ for (int i = 0; i < (int) tensorInfo.GetNumDimensions(); i++) {
+ out_info.shape.push_back(shape[i]);
+ tensor_size *= shape[i];
+ }
- LOGI("LEAVE");
+ out_info.data_type =
+ ConvertDataType((armnn::DataType) tensorInfo.GetDataType());
+ out_info.size = tensor_size;
+ property.tensor_infos.push_back(out_info);
+ }
- return ret;
-}
+ LOGI("LEAVE");
-int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
- LOGI("ENTER");
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
- // Make sure to check if tensor buffer count and binding info one are same.
- int err = CheckTensorBuffers(input_buffers, output_buffers);
- if (err != INFERENCE_ENGINE_ERROR_NONE) {
- return err;
- }
+ int InferenceARMNN::SetInputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ LOGI("ENTER");
- std::vector<armnn::BindingPointInfo>::iterator binding_iter;
- std::vector<inference_engine_tensor_buffer>::iterator buffer_iter;
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin();
+ iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("input layer name = %s", name.c_str());
+ }
- // Setup input layer.
- armnn::InputTensors input_tensors;
+ mDesignated_inputs.clear();
+ std::vector<std::string>().swap(mDesignated_inputs);
- for (binding_iter = mInputBindingInfo.begin(), buffer_iter = input_buffers.begin();
- binding_iter != mInputBindingInfo.end(); binding_iter++, buffer_iter++) {
- armnn::BindingPointInfo inBindingInfo = *binding_iter;
- armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
- inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
+ mDesignated_inputs = property.layer_names;
- armnn::Tensor input_tensor(inputTensorInfo, tensor_buffer.buffer);
- input_tensors.push_back({inBindingInfo.first, input_tensor});
+ LOGI("LEAVE");
- armnn::TensorShape shape = inputTensorInfo.GetShape();
- unsigned int tensor_size = 1;
- for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
- tensor_size *= shape[i];
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceARMNN::SetOutputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ LOGI("ENTER");
+
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin();
+ iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("output layer name = %s", name.c_str());
+ }
+
+ mDesignated_outputs.clear();
+ std::vector<std::string>().swap(mDesignated_outputs);
+
+ mDesignated_outputs = property.layer_names;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
+ {
+ LOGI("ENTER");
+
+ if (capacity == NULL) {
+ LOGE("Bad pointer.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ capacity->supported_accel_devices = INFERENCE_TARGET_CPU |
+ INFERENCE_TARGET_GPU;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceARMNN::CheckTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+ {
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+ LOGI("ENTER");
+
+ if (input_buffers.size() != mInputBindingInfo.size()) {
+ LOGE("input size(%zu) is different from input binding info's one(%zu).",
+ input_buffers.size(), mInputBindingInfo.size());
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ if (output_buffers.size() != mOutputBindingInfo.size()) {
+ LOGE("output size(%zu) is different from output binding info's one(%zu).",
+ output_buffers.size(), mOutputBindingInfo.size());
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- LOGI("Input Tensor dimension = %d (size = %u)", inputTensorInfo.GetNumDimensions(), tensor_size);
- }
+ LOGI("LEAVE");
- // Setup output layer.
- armnn::OutputTensors output_tensors;
+ return ret;
+ }
- for (binding_iter = mOutputBindingInfo.begin(), buffer_iter = output_buffers.begin();
- binding_iter != mOutputBindingInfo.end(); binding_iter++, buffer_iter++) {
- armnn::BindingPointInfo outBindingInfo = *binding_iter;
- armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
- inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
+ int InferenceARMNN::Run(
+ std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+ {
+ LOGI("ENTER");
- armnn::Tensor output_tensor(outputTensorInfo, tensor_buffer.buffer);
- output_tensors.push_back({outBindingInfo.first, output_tensor});
+ // Make sure to check if tensor buffer count and binding info one are same.
+ int err = CheckTensorBuffers(input_buffers, output_buffers);
+ if (err != INFERENCE_ENGINE_ERROR_NONE) {
+ return err;
+ }
- armnn::TensorShape shape = outputTensorInfo.GetShape();
- unsigned int tensor_size = 1;
- for (unsigned int i = 0; i < outputTensorInfo.GetNumDimensions(); i++)
- tensor_size *= shape[i];
+ std::vector<armnn::BindingPointInfo>::iterator binding_iter;
+ std::vector<inference_engine_tensor_buffer>::iterator buffer_iter;
- LOGI("Output Tensor dimension = %d (size = %u)", outputTensorInfo.GetNumDimensions(), tensor_size);
- }
+ // Setup input layer.
+ armnn::InputTensors input_tensors;
- armnn::Status ret = mRuntime->EnqueueWorkload(mNetworkIdentifier,
- input_tensors, output_tensors);
- if (ret == armnn::Status::Failure)
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ for (binding_iter = mInputBindingInfo.begin(),
+ buffer_iter = input_buffers.begin();
+ binding_iter != mInputBindingInfo.end();
+ binding_iter++, buffer_iter++) {
+ armnn::BindingPointInfo inBindingInfo = *binding_iter;
+ armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
+ inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
- LOGI("LEAVE");
+ armnn::Tensor input_tensor(inputTensorInfo, tensor_buffer.buffer);
+ input_tensors.push_back({ inBindingInfo.first, input_tensor });
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ armnn::TensorShape shape = inputTensorInfo.GetShape();
+ unsigned int tensor_size = 1;
+ for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions();
+ i++)
+ tensor_size *= shape[i];
-extern "C"
-{
-class IInferenceEngineCommon* EngineCommonInit(void)
-{
- LOGI("ENTER");
+ LOGI("Input Tensor dimension = %d (size = %u)",
+ inputTensorInfo.GetNumDimensions(), tensor_size);
+ }
- InferenceARMNN *engine = new InferenceARMNN();
+ // Setup output layer.
+ armnn::OutputTensors output_tensors;
- LOGI("LEAVE");
+ for (binding_iter = mOutputBindingInfo.begin(),
+ buffer_iter = output_buffers.begin();
+ binding_iter != mOutputBindingInfo.end();
+ binding_iter++, buffer_iter++) {
+ armnn::BindingPointInfo outBindingInfo = *binding_iter;
+ armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
+ inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
- return engine;
-}
+ armnn::Tensor output_tensor(outputTensorInfo, tensor_buffer.buffer);
+ output_tensors.push_back({ outBindingInfo.first, output_tensor });
-void EngineCommonDestroy(class IInferenceEngineCommon *engine)
-{
- LOGI("ENTER");
+ armnn::TensorShape shape = outputTensorInfo.GetShape();
+ unsigned int tensor_size = 1;
+ for (unsigned int i = 0; i < outputTensorInfo.GetNumDimensions();
+ i++)
+ tensor_size *= shape[i];
+
+ LOGI("Output Tensor dimension = %d (size = %u)",
+ outputTensorInfo.GetNumDimensions(), tensor_size);
+ }
+
+ armnn::Status ret = mRuntime->EnqueueWorkload(
+ mNetworkIdentifier, input_tensors, output_tensors);
+ if (ret == armnn::Status::Failure)
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+
+ LOGI("LEAVE");
- delete engine;
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ extern "C"
+ {
+ class IInferenceEngineCommon *EngineCommonInit(void)
+ {
+ LOGI("ENTER");
+
+ InferenceARMNN *engine = new InferenceARMNN();
- LOGI("LEAVE");
-}
-}
+ LOGI("LEAVE");
+
+ return engine;
+ }
+
+ void EngineCommonDestroy(class IInferenceEngineCommon *engine)
+ {
+ LOGI("ENTER");
+
+ delete engine;
+
+ LOGI("LEAVE");
+ }
+ }
} /* ARMNNImpl */
} /* InferenceEngineImpl */