return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceARMNN::SetTargetDevice(inference_target_type_e type)
+int InferenceARMNN::SetTargetDevices(int types)
{
LOGI("ENTER");
- switch (type) {
- case INFERENCE_TARGET_CPU:
+ if (types & INFERENCE_TARGET_CPU) {
mAccelType.push_back(armnn::Compute::CpuAcc);
LOGI("Inference target device is CPU");
- break;
- case INFERENCE_TARGET_GPU:
+ }
+
+ if (types & INFERENCE_TARGET_GPU) {
mAccelType.push_back(armnn::Compute::GpuAcc);
LOGI("Inference target device is GPU");
- break;
- case INFERENCE_TARGET_CUSTOM:
- case INFERENCE_TARGET_NONE:
- default:
- LOGW("Not supported device type [%d], Set CPU mode", (int)type);
- break;
- }
-
- // TODO. Accel type should be pushed according to a given compute device order.
+ }
LOGI("LEAVE");
// Output Tensor Params
int SetOutputTensorParamNodes(std::vector<std::string> nodes) override;
- int SetTargetDevice(inference_target_type_e type) override;
+ int SetTargetDevices(int types) override;
int Load(std::vector<std::string> model_paths, unsigned int num_of_models) override;