InferenceARMNN::~InferenceARMNN()
{
mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
+ std::map<std::string, int>().swap(mDesignated_inputs);
mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
+ std::map<std::string, int>().swap(mDesignated_outputs);
mInputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+ std::map<std::string, armnn::BindingPointInfo>().swap(mInputBindingInfo);
mOutputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+ std::map<std::string, armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+ mInputProperty.layers.clear();
+ std::map<std::string, inference_engine_tensor_info>().swap(mInputProperty.layers);
+
+ mOutputProperty.layers.clear();
+ std::map<std::string, inference_engine_tensor_info>().swap(mOutputProperty.layers);
armnn::IRuntime::Destroy(sRuntime);
sRuntime = nullptr;
// If there is any input layer designated by user then it is set as input layer.
// Otherwise, layer from armnn runtime will be set as input.
- if (mDesignated_inputs.empty()) {
- std::vector<std::string> in_names =
+ if (mInputProperty.layers.empty()) {
+ const std::vector<std::string>& in_names =
parser->GetSubgraphInputTensorNames(0);
- for (auto const &name : in_names) {
- mInputBindingInfo.push_back(
- parser->GetNetworkInputBindingInfo(0, name));
+ for (const auto &name : in_names) {
+ mInputBindingInfo.insert(
+ std::make_pair(name, parser->GetNetworkInputBindingInfo(0,name)));
LOGI("%s layer has been designated as input.", name.c_str());
}
} else {
mInputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
-
- std::vector<std::string>::iterator iter;
- for (iter = mDesignated_inputs.begin();
- iter != mDesignated_inputs.end(); iter++) {
- std::string name = *iter;
- mInputBindingInfo.push_back(
- parser->GetNetworkInputBindingInfo(0, name));
- LOGI("%s layer has been designated as input.", name.c_str());
+ std::map<std::string, armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+ for (auto& layer : mInputProperty.layers) {
+ mInputBindingInfo.insert(
+ std::make_pair(layer.first, parser->GetNetworkInputBindingInfo(0, layer.first)));
+ LOGI("%s layer has been designated as input.", layer.first.c_str());
}
}
// If there is any output layer designated by user then it is set as output layer.
// Otherwise, layer from armnn runtime will be set as output.
- if (mDesignated_outputs.empty()) {
- std::vector<std::string> out_names =
+ if (mOutputProperty.layers.empty()) {
+ const std::vector<std::string>& out_names =
parser->GetSubgraphOutputTensorNames(0);
- for (auto const &name : out_names) {
- mOutputBindingInfo.push_back(
- parser->GetNetworkOutputBindingInfo(0, name));
+ for (const auto &name : out_names) {
+ mOutputBindingInfo.insert(
+ std::make_pair(name, parser->GetNetworkOutputBindingInfo(0, name)));
LOGI("%s layer has been designated as output.", name.c_str());
}
} else {
mOutputBindingInfo.clear();
- std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
-
- std::vector<std::string>::iterator iter;
- for (iter = mDesignated_outputs.begin();
- iter != mDesignated_outputs.end(); iter++) {
- std::string name = *iter;
- mOutputBindingInfo.push_back(
- parser->GetNetworkOutputBindingInfo(0, name));
- LOGI("%s layer has been designated as output.", name.c_str());
+ std::map<std::string, armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+ for (auto& layer : mOutputProperty.layers) {
+ mOutputBindingInfo.insert(
+ std::make_pair(layer.first, parser->GetNetworkOutputBindingInfo(0, layer.first)));
+ LOGI("%s layer has been designated as output.", layer.first.c_str());
}
}
}
int InferenceARMNN::GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
}
int InferenceARMNN::GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
// TODO. Need to check if model file loading is done.
- std::vector<armnn::BindingPointInfo>::iterator iter;
- for (iter = mInputBindingInfo.begin(); iter != mInputBindingInfo.end();
- iter++) {
+ for (auto& info : mInputBindingInfo) {
inference_engine_tensor_info out_info = {};
- armnn::BindingPointInfo bindingInfo = *iter;
- armnn::TensorInfo tensorInfo = bindingInfo.second;
- armnn::TensorShape shape = tensorInfo.GetShape();
+ armnn::BindingPointInfo& bindingInfo = info.second;
+ armnn::TensorInfo& tensorInfo = bindingInfo.second;
+ armnn::TensorShape& shape = tensorInfo.GetShape();
size_t tensor_size = 1;
for (int i = 0; i < (int) tensorInfo.GetNumDimensions(); i++) {
out_info.data_type =
ConvertDataType((armnn::DataType) tensorInfo.GetDataType());
out_info.size = tensor_size;
- property.tensor_infos.push_back(out_info);
+ property.layers.insert(std::make_pair(info.first, out_info));
}
LOGI("LEAVE");
// TODO. Need to check if model file loading is done.
- std::vector<armnn::BindingPointInfo>::iterator iter;
- for (iter = mOutputBindingInfo.begin();
- iter != mOutputBindingInfo.end(); iter++) {
+ for (auto& info : mOutputBindingInfo) {
inference_engine_tensor_info out_info = {};
- armnn::BindingPointInfo bindingInfo = *iter;
- armnn::TensorInfo tensorInfo = bindingInfo.second;
- armnn::TensorShape shape = tensorInfo.GetShape();
+ armnn::BindingPointInfo& bindingInfo = info.second;
+ armnn::TensorInfo& tensorInfo = bindingInfo.second;
+ armnn::TensorShape& shape = tensorInfo.GetShape();
size_t tensor_size = 1;
for (int i = 0; i < (int) tensorInfo.GetNumDimensions(); i++) {
out_info.data_type =
ConvertDataType((armnn::DataType) tensorInfo.GetDataType());
out_info.size = tensor_size;
- property.tensor_infos.push_back(out_info);
+ property.layers.insert(std::make_pair(info.first, out_info));
}
LOGI("LEAVE");
{
LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("input layer name = %s", name.c_str());
+ for (auto& layer : property.layers) {
+ LOGI("input layer name = %s", layer.first.c_str());
}
mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
+ std::map<std::string, int>().swap(mDesignated_inputs);
- mDesignated_inputs = property.layer_names;
+ mInputProperty = property;
LOGI("LEAVE");
{
LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("output layer name = %s", name.c_str());
+ for (auto& layer : property.layers) {
+ LOGI("output layer name = %s", layer.first.c_str());
}
mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
+ std::map<std::string, int>().swap(mDesignated_outputs);
- mDesignated_outputs = property.layer_names;
+ mOutputProperty = property;
LOGI("LEAVE");
}
int InferenceARMNN::CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
int ret = INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceARMNN::Run(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");
return err;
}
- std::vector<armnn::BindingPointInfo>::iterator binding_iter;
- std::vector<inference_engine_tensor_buffer>::iterator buffer_iter;
-
+ auto binding_iter = mInputBindingInfo.begin();
+ auto buffer_iter = input_buffers.begin();
// Setup input layer.
armnn::InputTensors input_tensors;
-
- for (binding_iter = mInputBindingInfo.begin(),
- buffer_iter = input_buffers.begin();
- binding_iter != mInputBindingInfo.end();
+ for (; binding_iter != mInputBindingInfo.end();
binding_iter++, buffer_iter++) {
- armnn::BindingPointInfo inBindingInfo = *binding_iter;
- armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
- inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
+ armnn::BindingPointInfo& inBindingInfo = binding_iter->second;
+ armnn::TensorInfo& inputTensorInfo = inBindingInfo.second;
+ inference_engine_tensor_buffer& tensor_buffer = buffer_iter->second;
armnn::Tensor input_tensor(inputTensorInfo, tensor_buffer.buffer);
input_tensors.push_back({ inBindingInfo.first, input_tensor });
- armnn::TensorShape shape = inputTensorInfo.GetShape();
+ armnn::TensorShape& shape = inputTensorInfo.GetShape();
unsigned int tensor_size = 1;
for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions();
i++)
inputTensorInfo.GetNumDimensions(), tensor_size);
}
+ binding_iter = mOutputBindingInfo.begin();
+ buffer_iter = output_buffers.begin();
// Setup output layer.
armnn::OutputTensors output_tensors;
- for (binding_iter = mOutputBindingInfo.begin(),
- buffer_iter = output_buffers.begin();
- binding_iter != mOutputBindingInfo.end();
+ for (; binding_iter != mOutputBindingInfo.end();
binding_iter++, buffer_iter++) {
- armnn::BindingPointInfo outBindingInfo = *binding_iter;
- armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
- inference_engine_tensor_buffer tensor_buffer = *buffer_iter;
+ armnn::BindingPointInfo& outBindingInfo = binding_iter->second;
+ armnn::TensorInfo& outputTensorInfo = outBindingInfo.second;
+ inference_engine_tensor_buffer& tensor_buffer = buffer_iter->second;
armnn::Tensor output_tensor(outputTensorInfo, tensor_buffer.buffer);
output_tensors.push_back({ outBindingInfo.first, output_tensor });
- armnn::TensorShape shape = outputTensorInfo.GetShape();
+ armnn::TensorShape& shape = outputTensorInfo.GetShape();
unsigned int tensor_size = 1;
for (unsigned int i = 0; i < outputTensorInfo.GetNumDimensions();
i++)