Implement GetBackendCapacity callback
authorInki Dae <inki.dae@samsung.com>
Mon, 10 Feb 2020 06:28:58 +0000 (15:28 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 10 Feb 2020 06:28:58 +0000 (15:28 +0900)
This patch implements GetBackendCapacity callback and
provides device types that ARMNN can run the inference.

Change-Id: If67f53f8e0932ec997dd3ba03bf065dc64056627
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp
src/inference_engine_armnn_private.h

index 137613968c3919a0640bb6e1dd2c811638b3bd6f..464c45997fcc0ed59161945fe1c36f4c6aefac1f 100644 (file)
@@ -138,14 +138,16 @@ int InferenceARMNN::SetTargetDevices(int types)
 {
     LOGI("ENTER");
 
+
+    LOGI("Inference targets are, ");
     if (types & INFERENCE_TARGET_CPU) {
         mAccelType.push_back(armnn::Compute::CpuAcc);
-        LOGI("Inference target device is CPU");
+        LOGI("CPU");
     }
 
     if (types & INFERENCE_TARGET_GPU) {
         mAccelType.push_back(armnn::Compute::GpuAcc);
-        LOGI("Inference target device is GPU");
+        LOGI("GPU");
     }
 
     LOGI("LEAVE");
@@ -295,6 +297,19 @@ void * InferenceARMNN::GetInputDataPtr()
     return (void *)mInputTensor.front().GetMemoryArea();
 }
 
+int InferenceARMNN::GetBackendCapacity(inference_engine_capacity *capacity)
+{
+    if (capacity == NULL) {
+        LOGE("Bad pointer.");
+        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+    }
+
+    capacity->supported_accel_devices = INFERENCE_TARGET_CPU |
+                                        INFERENCE_TARGET_GPU;
+
+    return INFERENCE_ENGINE_ERROR_NONE;
+}
+
 int InferenceARMNN::SetInputDataBuffer(tensor_t data)
 {
     LOGI("ENTER");
index 01e82208d9c42130db60b0152c65bb18ab7c5d72..06dbfa5c461616e070f04b9b5d89d271eca4c5fe 100644 (file)
@@ -63,6 +63,8 @@ public:
 
     void * GetInputDataPtr() override;
 
+    int GetBackendCapacity(inference_engine_capacity *capacity) override;
+
     int SetInputDataBuffer(tensor_t data) override;
 
     int Run() override;