{
LOGI("ENTER");
+
+ LOGI("Inference targets are, ");
if (types & INFERENCE_TARGET_CPU) {
mAccelType.push_back(armnn::Compute::CpuAcc);
- LOGI("Inference target device is CPU");
+ LOGI("CPU");
}
if (types & INFERENCE_TARGET_GPU) {
mAccelType.push_back(armnn::Compute::GpuAcc);
- LOGI("Inference target device is GPU");
+ LOGI("GPU");
}
LOGI("LEAVE");
return (void *)mInputTensor.front().GetMemoryArea();
}
+int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
+{
+ if (capacity == NULL) {
+ LOGE("Bad pointer.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ capacity->supported_accel_devices = INFERENCE_TARGET_CPU |
+ INFERENCE_TARGET_GPU;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
int InferenceARMNN::SetInputDataBuffer(tensor_t data)
{
LOGI("ENTER");