Fixed a backend issue that when user requested an inference with
ONE type, it passed TFLITE type instead of ONE.
So this patch passes ONE type to inference-engine-mlapi backend correctly.
Change-Id: Ib91e9880873f1023dfb7bc8bf3b5f838029527a3
Signed-off-by: Inki Dae <inki.dae@samsung.com>
}
// If user requested an inference with ONE backend and CPU/GPU device then
- // pass TFLITE backend type to MLAPI backend.
+ // pass ONE backend type to MLAPI backend.
if (backend_type == INFERENCE_BACKEND_ONE &&
(device_type & INFERENCE_TARGET_CPU || device_type & INFERENCE_TARGET_GPU))
- backend_type = INFERENCE_BACKEND_TFLITE;
+ backend_type = INFERENCE_BACKEND_ONE;
// If NPU type is declared in ini file then pass the type to
// a given inference engine backend.