Change behavior to MLAPI type 30/255830/1 accepted/tizen/unified/20210331.053928 submit/tizen/20210325.014337
authorInki Dae <inki.dae@samsung.com>
Wed, 24 Mar 2021 06:27:03 +0000 (15:27 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 24 Mar 2021 06:27:03 +0000 (15:27 +0900)
The behavior of INFEENCE_BACKEND_MLAPI type is fixed with
NPU device according to policy change so this patch makes
it to support only NPU device when user requested an inference
with INFERENCE_BACKEND_MLAPI type.

Change-Id: I25afff6cb8ff346a76bddde10fa021d11ca2b22f
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_common_impl.cpp
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp

index 325fd3731755b83f17f2d5364a770ff2e1af713b..4ef465ff1352453f9bcc5ca79a84ce00a2162bbd 100644 (file)
@@ -333,9 +333,16 @@ namespace Common
                if (backend_type == INFERENCE_BACKEND_MLAPI &&
                                (device_type & INFERENCE_TARGET_CPU || device_type & INFERENCE_TARGET_GPU)) {
                        backend_type = INFERENCE_BACKEND_TFLITE;
-                       LOGI("API framework is MLAPI with TFLITE tensor filter.\n");
+                       LOGE("MLAPI type is used only for NPU device.\n");
+                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                }
 
+               // If user requested an inference with ONE backend and CPU/GPU device then
+               // pass TFLITE backend type to MLAPI backend.
+               if (backend_type == INFERENCE_BACKEND_ONE &&
+                               (device_type & INFERENCE_TARGET_CPU || device_type & INFERENCE_TARGET_GPU))
+                       backend_type = INFERENCE_BACKEND_TFLITE;
+
                // If NPU type is declared in ini file then pass the type to
                // a given inference engine backend.
                if (backend_type == INFERENCE_BACKEND_MLAPI &&
@@ -384,6 +391,7 @@ namespace Common
                        BackendTable.insert(std::make_pair("armnn",INFERENCE_BACKEND_ARMNN));
                        BackendTable.insert(std::make_pair("opencv",INFERENCE_BACKEND_OPENCV));
                        BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI));
+                       BackendTable.insert(std::make_pair("one",INFERENCE_BACKEND_ONE));
 
                        config->backend_type = BackendTable.find(config->backend_name)->second;
                }
index 7f873c456e20fed7f2cfbcfe583adde9ad5c8480..81581f077d2ec01bd480be93e250b6989c09948f 100644 (file)
@@ -1097,7 +1097,7 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                // TFLITE via MLAPI.
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU,
+                                               "one", INFERENCE_TARGET_CPU,
                                                TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
@@ -1106,7 +1106,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 3 }),
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU,
+                                               "one", INFERENCE_TARGET_CPU,
                                                TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
@@ -1116,7 +1116,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1128,7 +1128,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 451, 474, 714, 969 }),
                                // face detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1140,7 +1140,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 727, 225, 960, 555 }),
                                // pose estimation test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1150,11 +1150,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
@@ -1165,7 +1165,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // mobilenet based image classification test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU,
+                                               "one", INFERENCE_TARGET_GPU,
                                                TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
@@ -1175,7 +1175,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // quantized mobilenet based image classification test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU,
+                                               "one", INFERENCE_TARGET_GPU,
                                                TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
@@ -1186,7 +1186,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // object detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1199,7 +1199,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // face detection test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -1212,7 +1212,7 @@ INSTANTIATE_TEST_CASE_P(
 
                                // pose estimation test
                                ParamType_Infer(
-                                               "mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
                                                INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
@@ -1222,11 +1222,11 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                // Hand gesture model 1 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
                                                { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
                                // Hand gesture model 2 from AIC
-                               ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                               ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
                                                { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
                                                { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
index 10172006e547fb2be66a80a2a5f85e543ab9fb4f..b46b4e8224b31f08e02d357df7b90b6e6c044633 100644 (file)
@@ -657,7 +657,7 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1,
                                                                // OPENCV
                                                                ParamType_One("opencv"),
                                                                // ML Single API for NNStreamer.
-                                                               ParamType_One("mlapi")
+                                                               ParamType_One("one")
                                                                /* TODO */
                                                                ));
 
@@ -671,7 +671,7 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2,
                                                                // OPENCV.
                                                                ParamType_Two("opencv", INFERENCE_TARGET_CPU),
                                                                // ML Single API for NNStreamer.
-                                                               ParamType_Two("mlapi", INFERENCE_TARGET_CPU)
+                                                               ParamType_Two("one", INFERENCE_TARGET_CPU)
                                                                /* TODO */
                                                                ));
 
@@ -695,7 +695,7 @@ INSTANTIATE_TEST_CASE_P(
                                                  "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }),
                                // ML Single API for NNStreamer.
                                ParamType_Three(
-                                               "mlapi", INFERENCE_TARGET_CPU,
+                                               "one", INFERENCE_TARGET_CPU,
                                                { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" })
                                /* TODO */
                                ));
@@ -715,7 +715,7 @@ INSTANTIATE_TEST_CASE_P(
                                ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
                                                          224, 3, { "test_name" }),
                                // ML Single API for NNStreamer.
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          224, 3, { "test_name" })
                                /* TODO */
                                ));
@@ -756,14 +756,14 @@ INSTANTIATE_TEST_CASE_P(
                                ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
                                                          224, 3, { "" }),
                                // ML Single API for NNStreamer.
-                               ParamType_Six("mlapi", -1, 224, 224, 3, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
+                               ParamType_Six("one", -1, 224, 224, 3, { "test_name" }),
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
                                                          224, 3, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          0, 3, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          224, 0, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          224, 3, { "" })
                                /* TODO */
                                ));
@@ -803,7 +803,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                // ONE.
                                ParamType_Many(
-                                               "mlapi", INFERENCE_ENGINE_PROFILER_OFF,
+                                               "one", INFERENCE_ENGINE_PROFILER_OFF,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
@@ -840,7 +840,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                // ONE.
                                ParamType_Many(
-                                               "mlapi", INFERENCE_ENGINE_PROFILER_FILE,
+                                               "one", INFERENCE_ENGINE_PROFILER_FILE,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
@@ -877,7 +877,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                // ONE.
                                ParamType_Many(
-                                               "mlapi", INFERENCE_ENGINE_PROFILER_CONSOLE,
+                                               "one", INFERENCE_ENGINE_PROFILER_CONSOLE,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },