ARMNN_DATA_TYPE_MAX
};
-static const char *ARMNN_DATA_TYPE_STR[ARMNN_DATA_TYPE_MAX] =
-{
- "",
- "float32",
- "int32",
- "uint8",
- "int64",
- "string",
- "bool"
-};
-
InferenceARMNN::InferenceARMNN(void) :
mRuntime(nullptr, &armnn::IRuntime::Destroy),
mNetwork(armnn::INetworkPtr(nullptr, nullptr))
;
}
+inference_tensor_data_type_e InferenceARMNN::ConvertDataType(armnn::DataType type)
+{
+ inference_tensor_data_type_e data_type;
+
+ switch ((int)type) {
+ case ARMNN_DATA_TYPE_FLOAT32:
+ data_type = TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ case ARMNN_DATA_TYPE_UINT8:
+ data_type = TENSOR_DATA_TYPE_UINT8;
+ break;
+ default:
+ LOGE("Invalid Input tensor type so it will use float32 in default.");
+ data_type = TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ }
+
+ return data_type;
+}
+
void *InferenceARMNN::AllocateTensorBuffer(armnn::DataType type, int tensor_size)
{
void *tensor_buffer = nullptr;
};
}
-int InferenceARMNN::SetInputTensorParamNode(std::string node)
-{
- LOGI("ENTER");
-
- mInputLayerName = node;
- LOGI("Input Layer Name = %s", node.c_str());
-
- LOGI("LEAVE");
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::SetOutputTensorParamNodes(std::vector<std::string> nodes)
-{
- LOGI("ENTER");
-
- mOutputLayerName = nodes;
- std::string output_layer_names;
-
- LOGI("Output Layer Names");
- for (int i = 0; i < (int)nodes.size(); i++) {
- std::string node_name = nodes[i];
-
- output_layer_names.append(node_name.c_str());
- output_layer_names.append(" ");
- LOGI("%s", output_layer_names.c_str());
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
int InferenceARMNN::SetTargetDevices(int types)
{
LOGI("ENTER");
return ret;
}
-// Create input tensor buffer and set the input layer type.
-int InferenceARMNN::CreateInputLayerPassage()
+int InferenceARMNN::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
- armnn::BindingPointInfo bindingInfo = inputBindingInfo.front();
- armnn::TensorInfo tensorInfo = bindingInfo.second;
+ // Upper layer will allocate input tensor buffer/buffers.
- LOGI("Input Tensor Info");
- armnn::TensorShape shape = tensorInfo.GetShape();
- int tensor_size = 1;
- for (int i = 0; i < (int)tensorInfo.GetNumDimensions(); i++) {
- tensor_size *= shape[i];
- }
+ LOGI("LEAVE");
- LOGI("Input Tensor size = %d", tensor_size);
- LOGI("Input Tensor type = %d(%s)", (int)tensorInfo.GetDataType(),
- ARMNN_DATA_TYPE_STR[(int)tensorInfo.GetDataType()]);
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
- void *inputData = AllocateTensorBuffer((armnn::DataType)tensorInfo.GetDataType(),
- tensor_size);
- if (inputData == nullptr) {
- LOGE("Fail to allocate tensor buffer.");
- return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
- }
+int InferenceARMNN::GetInputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
- mInputLayerType = (int)tensorInfo.GetDataType();
+ // TODO. Need to check if model file loading is done.
- armnn::Tensor input_tensor(tensorInfo, inputData);
+ std::vector<armnn::BindingPointInfo>::iterator iter;
+ for (iter = inputBindingInfo.begin(); iter != inputBindingInfo.end(); iter++) {
+ inference_engine_tensor_info out_info;
+ armnn::BindingPointInfo bindingInfo = *iter;
+ armnn::TensorInfo tensorInfo = bindingInfo.second;
+ armnn::TensorShape shape = tensorInfo.GetShape();
+ size_t tensor_size = 1;
- mInputTensor.clear();
- mInputTensor.push_back(input_tensor);
+ for (int i = 0; i < (int)tensorInfo.GetNumDimensions(); i++) {
+ out_info.shape.push_back(shape[i]);
+ tensor_size *= shape[i];
+ }
+
+ out_info.data_type = ConvertDataType((armnn::DataType)tensorInfo.GetDataType());
+ out_info.size = tensor_size;
+ property.tensor_infos.push_back(out_info);
+ }
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceARMNN::GetInputLayerAttrType()
+int InferenceARMNN::GetOutputLayerProperty(inference_engine_layer_property &property)
{
LOGI("ENTER");
- LOGI("LEAVE");
+ // TODO. Need to check if model file loading is done.
- return mInputLayerType;
-}
+ std::vector<armnn::BindingPointInfo>::iterator iter;
+ for (iter = outputBindingInfo.begin(); iter != outputBindingInfo.end(); iter++) {
+ inference_engine_tensor_info out_info;
+ armnn::BindingPointInfo bindingInfo = *iter;
+ armnn::TensorInfo tensorInfo = bindingInfo.second;
+ armnn::TensorShape shape = tensorInfo.GetShape();
+ size_t tensor_size = 1;
-void * InferenceARMNN::GetInputDataPtr()
-{
- LOGI("ENTER");
+ for (int i = 0; i < (int)tensorInfo.GetNumDimensions(); i++) {
+ out_info.shape.push_back(shape[i]);
+ tensor_size *= shape[i];
+ }
+
+ out_info.data_type = ConvertDataType((armnn::DataType)tensorInfo.GetDataType());
+ out_info.size = tensor_size;
+ property.tensor_infos.push_back(out_info);
+ }
LOGI("LEAVE");
- // TODO. std::move
- return (void *)mInputTensor.front().GetMemoryArea();
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
{
+ LOGI("ENTER");
+
if (capacity == NULL) {
LOGE("Bad pointer.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
capacity->supported_accel_devices = INFERENCE_TARGET_CPU |
INFERENCE_TARGET_GPU;
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::SetInputDataBuffer(tensor_t data)
-{
- LOGI("ENTER");
-
- // data contains only tensor shape information such as NCHW ro NHWC,
- // and front end of ARMNN backend, inference-engine-interface, doesn't
- // provide tensor buffer.
-
- std::vector<int> tensor = data.dimInfo.front();
-
- if (tensor.empty()) {
- LOGE("Tensor data is null");
- return INFERENCE_ENGINE_ERROR_NONE;
- }
-
- LOGI("ch(%d), height(%d), wight(%d)", (int)tensor[1], (int)tensor[2], (int)tensor[3]);
-
- int tensor_size = 1;
- for (int i = 0; i < (int)tensor.size(); i++) {
- tensor_size *= (int)tensor[i];
- }
-
- LOGI("Input Tensor size : %d", tensor_size);
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceARMNN::Run()
+int InferenceARMNN::Run(std::vector<inference_engine_tensor_buffer> &input_buffers)
{
LOGI("ENTER");
- // TODO. check tensor info.
+ armnn::BindingPointInfo outBindingInfo = outputBindingInfo.front();
+ armnn::TensorInfo outputTensorInfo = outBindingInfo.second;
+ armnn::TensorShape shape = outputTensorInfo.GetShape();
- armnn::BindingPointInfo bindingInfo = outputBindingInfo.front();
- armnn::TensorInfo tensorInfo = bindingInfo.second;
- armnn::TensorShape shape = tensorInfo.GetShape();
+ // input
+ armnn::BindingPointInfo inBindingInfo = inputBindingInfo.front();
+ armnn::TensorInfo inputTensorInfo = inBindingInfo.second;
int tensor_size = 1;
- for (int i = 0; i < (int)tensorInfo.GetNumDimensions(); i++) {
+ for (int i = 0; i < (int)outputTensorInfo.GetNumDimensions(); i++) {
tensor_size *= shape[i];
}
LOGI("Output Tensor size = %d", tensor_size);
- void *outputData = AllocateTensorBuffer((armnn::DataType)tensorInfo.GetDataType(),
+ // TODO. consider mutiple input and output.
+
+ void *outputData = AllocateTensorBuffer((armnn::DataType)outputTensorInfo.GetDataType(),
tensor_size);
if (outputData == nullptr) {
LOGE("Fail to allocate tensor buffer.");
return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
}
- armnn::Tensor output_tensor(tensorInfo, outputData);
+ armnn::Tensor output_tensor(outputTensorInfo, outputData);
+ armnn::Tensor input_tensor(inputTensorInfo, input_buffers.front().buffer);
mOutputTensor.clear();
mOutputTensor.push_back(output_tensor);
- // TODO. consider mutiple input and output.
+ std::vector<armnn::Tensor> InputTensors;
+ InputTensors.push_back(input_tensor);
+
armnn::InputTensors input_tensors;
armnn::OutputTensors output_tensors;
- input_tensors.push_back({inputBindingInfo[0].first, mInputTensor.front()});
+ input_tensors.push_back({inputBindingInfo[0].first, InputTensors.front()});
output_tensors.push_back({outputBindingInfo[0].first, mOutputTensor.front()});
armnn::Status ret = mRuntime->EnqueueWorkload(mNetworkIdentifier,
- input_tensors, output_tensors);
+ input_tensors, output_tensors);
if (ret == armnn::Status::Failure)
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- // release all input tensor buffers because they aren't used anymore.
- std::vector<armnn::Tensor>::iterator iter;
- for (iter = mInputTensor.begin(); iter != mInputTensor.end(); iter++) {
- armnn::Tensor inputTensor = *iter;
- ReleaseTensorBuffer((armnn::DataType)inputTensor.GetDataType(),
- inputTensor.GetMemoryArea());
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceARMNN::Run(std::vector<float> tensor)
-{
- LOGI("ENTER");
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;