Consider one more inference target devices
authorInki Dae <inki.dae@samsung.com>
Fri, 7 Feb 2020 06:43:13 +0000 (15:43 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 7 Feb 2020 06:43:13 +0000 (15:43 +0900)
Change-Id: I51b9a04f60e4b4fe873d8d8a1fda6a122532ce31
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp
src/inference_engine_armnn_private.h

index 956c1e071f184b86e39e916380d643ec44cf34fd..137613968c3919a0640bb6e1dd2c811638b3bd6f 100644 (file)
@@ -134,27 +134,19 @@ int InferenceARMNN::SetOutputTensorParamNodes(std::vector<std::string> nodes)
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceARMNN::SetTargetDevice(inference_target_type_e type)
+int InferenceARMNN::SetTargetDevices(int types)
 {
     LOGI("ENTER");
 
-    switch (type) {
-    case INFERENCE_TARGET_CPU:
+    if (types & INFERENCE_TARGET_CPU) {
         mAccelType.push_back(armnn::Compute::CpuAcc);
         LOGI("Inference target device is CPU");
-        break;
-    case INFERENCE_TARGET_GPU:
+    }
+
+    if (types & INFERENCE_TARGET_GPU) {
         mAccelType.push_back(armnn::Compute::GpuAcc);
         LOGI("Inference target device is GPU");
-        break;
-    case INFERENCE_TARGET_CUSTOM:
-    case INFERENCE_TARGET_NONE:
-       default:
-               LOGW("Not supported device type [%d], Set CPU mode", (int)type);
-        break;
-       }
-
-    // TODO. Accel type should be pushed according to a given compute device order.
+    }
 
     LOGI("LEAVE");
 
index e0286ed8025d24bd66df6d48558318aa3cdecc67..01e82208d9c42130db60b0152c65bb18ab7c5d72 100644 (file)
@@ -53,7 +53,7 @@ public:
     // Output Tensor Params
     int SetOutputTensorParamNodes(std::vector<std::string> nodes) override;
 
-    int SetTargetDevice(inference_target_type_e type) override;
+    int SetTargetDevices(int types) override;
 
     int Load(std::vector<std::string> model_paths, unsigned int num_of_models) override;