return data_type;
}
-void *InferenceARMNN::AllocateTensorBuffer(armnn::DataType type, int tensor_size)
-{
- void *tensor_buffer = nullptr;
-
- switch ((int)type) {
- case ARMNN_DATA_TYPE_FLOAT32:
- tensor_buffer = (void *)(new float[tensor_size]);
- break;
- case ARMNN_DATA_TYPE_UINT8:
- tensor_buffer = (void *)(new unsigned char[tensor_size]);
- break;
- default:
- LOGE("Invalid Input tensor type.");
- return nullptr;
- };
-
- return tensor_buffer;
-}
-
-void InferenceARMNN::ReleaseTensorBuffer(armnn::DataType type, void *tensor_buffer)
-{
- if (tensor_buffer == nullptr) {
- LOGE("Invalid Input buffer.");
- return;
- }
-
- switch ((int)type) {
- case ARMNN_DATA_TYPE_FLOAT32:
- delete[] (float *)(tensor_buffer);
- break;
- case ARMNN_DATA_TYPE_UINT8:
- delete[] (unsigned char *)(tensor_buffer);
- break;
- default:
- LOGE("Invalid Input tensor type.");
- break;
- };
-}
-
int InferenceARMNN::SetTargetDevices(int types)
{
LOGI("ENTER");
private:
int CreateTfLiteNetwork(std::string model_path);
int CreateNetwork(std::vector<std::string> model_paths, inference_model_format_e model_format);
- void *AllocateTensorBuffer(armnn::DataType type, int tensor_size);
inference_tensor_data_type_e ConvertDataType(armnn::DataType type);
- void ReleaseTensorBuffer(armnn::DataType type, void *tensor_buffer);
int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
std::vector<inference_engine_tensor_buffer> &output_buffers);