InferenceARMNN::~InferenceARMNN()
{
- ;
+ mDesignated_inputs.clear();
+ std::vector<std::string>().swap(mDesignated_inputs);
+
+ mDesignated_outputs.clear();
+ std::vector<std::string>().swap(mDesignated_outputs);
+
+ mInputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+ mOutputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
}
inference_tensor_data_type_e InferenceARMNN::ConvertDataType(armnn::DataType type)
if (!mNetwork)
return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
- std::vector<std::string> in_names = parser->GetSubgraphInputTensorNames(0);
- for (auto const &name:in_names) {
- mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
+ // If there is any input layer designated by user then it is set as input layer.
+ // Otherwise, layer from armnn runtime will be set as input.
+ if (mDesignated_inputs.empty()) {
+ std::vector<std::string> in_names = parser->GetSubgraphInputTensorNames(0);
+ for (auto const &name:in_names) {
+ mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
+ LOGI("%s layer has been designated as input.", name.c_str());
+ }
+ } else {
+ mInputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mInputBindingInfo);
+
+ std::vector<std::string>::iterator iter;
+ for (iter = mDesignated_inputs.begin(); iter != mDesignated_inputs.end();
+ iter++) {
+ std::string name = *iter;
+ mInputBindingInfo.push_back(parser->GetNetworkInputBindingInfo(0, name));
+ LOGI("%s layer has been designated as input.", name.c_str());
+ }
}
- std::vector<std::string> out_names = parser->GetSubgraphOutputTensorNames(0);
- for (auto const &name:out_names) {
- mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
+ // If there is any output layer designated by user then it is set as output layer.
+ // Otherwise, layer from armnn runtime will be set as output.
+ if (mDesignated_outputs.empty()) {
+ std::vector<std::string> out_names = parser->GetSubgraphOutputTensorNames(0);
+ for (auto const &name:out_names) {
+ mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
+ LOGI("%s layer has been designated as output.", name.c_str());
+ }
+ } else {
+ mOutputBindingInfo.clear();
+ std::vector<armnn::BindingPointInfo>().swap(mOutputBindingInfo);
+
+ std::vector<std::string>::iterator iter;
+ for (iter = mDesignated_outputs.begin(); iter != mDesignated_outputs.end(); iter++) {
+ std::string name = *iter;
+ mOutputBindingInfo.push_back(parser->GetNetworkOutputBindingInfo(0, name));
+ LOGI("%s layer has been designated as output.", name.c_str());
+ }
}
LOGI("Input Tensor count = %d", (int)mInputBindingInfo.size());
return INFERENCE_ENGINE_ERROR_NONE;
}
+int InferenceARMNN::SetInputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
+
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("input layer name = %s", name.c_str());
+ }
+
+ mDesignated_inputs.clear();
+ std::vector<std::string>().swap(mDesignated_inputs);
+
+ mDesignated_inputs = property.layer_names;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceARMNN::SetOutputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
+
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("input layer name = %s", name.c_str());
+ }
+
+ mDesignated_outputs.clear();
+ std::vector<std::string>().swap(mDesignated_outputs);
+
+ mDesignated_outputs = property.layer_names;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
{
LOGI("ENTER");
return INFERENCE_ENGINE_ERROR_NONE;
}
+
+int InferenceARMNN::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+ LOGI("ENTER");
+
+ if (input_buffers.size() != mInputBindingInfo.size()) {
+ LOGE("input size(%d) is different from input binding info's one(%d).",
+ input_buffers.size(), mInputBindingInfo.size());
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ if (output_buffers.size() != mOutputBindingInfo.size()) {
+ LOGE("output size(%d) is different from output binding info's one(%d).",
+ output_buffers.size(), mOutputBindingInfo.size());
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGI("LEAVE");
+
+ return ret;
+}
+
int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
std::vector<inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");
+ // Make sure to check if tensor buffer count and binding info one are same.
+ int err = CheckTensorBuffers(input_buffers, output_buffers);
+ if (err != INFERENCE_ENGINE_ERROR_NONE) {
+ return err;
+ }
+
armnn::BindingPointInfo outBindingInfo = mOutputBindingInfo.front();
armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
armnn::TensorShape shape = outputTensorInfo.GetShape();