Use MLAPI backend for ARMNN and TFLITE inference engine sandbox/inki.dae/mlapi
authorInki Dae <inki.dae@samsung.com>
Fri, 11 Sep 2020 07:26:50 +0000 (16:26 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 14 Oct 2020 06:24:33 +0000 (15:24 +0900)
This patch makes inference engine framework to use MLAPI in default.

Change-Id: I2d653906a923452c40db8afd62b165f72949ebcb
Signed-off-by: Inki Dae <inki.dae@samsung.com>
include/inference_engine_type.h
src/inference_engine_common_impl.cpp
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp

index c2f78a0..cf86159 100644 (file)
@@ -37,10 +37,11 @@ extern "C"
        typedef enum {
                INFERENCE_BACKEND_NONE = -1, /**< None */
                INFERENCE_BACKEND_OPENCV, /**< OpenCV */
+               INFERENCE_BACKEND_TFLITE_NATIVE, /**< TensorFlow-Lite without MLAPI */
                INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
                INFERENCE_BACKEND_ARMNN, /**< ARMNN */
-               INFERENCE_BACKEND_MLAPI, /** < ML Single API of NNStreamer.*/
                INFERENCE_BACKEND_ONE, /** < On-device Neural Engine. */
+               INFERENCE_BACKEND_VIVANTE, /** < 3rd party's NPU. */
                INFERENCE_BACKEND_MAX /**< Backend MAX */
        } inference_backend_type_e;
 
index d27be80..26e7ae7 100644 (file)
@@ -210,9 +210,10 @@ namespace Common
                        return INFERENCE_ENGINE_ERROR_INTERNAL;
                }
 
-               // If a backend is ML Single API of NNStreamer or ONE then set a tensor filter plugin type.
-               if (backend_type == INFERENCE_BACKEND_ONE ||
-                       backend_type == INFERENCE_BACKEND_MLAPI) {
+               // Use a internal backend for OpenCV and ARMNN instead of MLAPI
+               // because MLAPI doesn't support OpenCV and ARMNN backends.
+               if (backend_type != INFERENCE_BACKEND_OPENCV &&
+                               backend_type != INFERENCE_BACKEND_ARMNN) {
                        int ret = mBackendHandle->SetPrivateData(&backend_type);
                        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                                LOGE("Failed to set a tensor filter plugin.");
@@ -237,13 +238,28 @@ namespace Common
                        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                }
 
+               if (config->backend_type <= INFERENCE_BACKEND_NONE ||
+                       config->backend_type >= INFERENCE_BACKEND_MAX) {
+                       LOGE("Backend type is invalid.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
                if (mUseProfiler == true) {
                        // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
                        mProfiler->Start(IE_PROFILER_MEMORY);
                }
 
+               std::string backendNameTable[INFERENCE_BACKEND_MAX] = {
+                       [INFERENCE_BACKEND_OPENCV] = "opencv",
+                       [INFERENCE_BACKEND_TFLITE_NATIVE] = "tflite",
+                       [INFERENCE_BACKEND_TFLITE] = "mlapi",
+                       [INFERENCE_BACKEND_ARMNN] = "armnn",
+                       [INFERENCE_BACKEND_ONE] = "mlapi",
+                       [INFERENCE_BACKEND_VIVANTE] = "mlapi"
+               };
+
                std::string backendLibName =
-                               "libinference-engine-" + config->backend_name + ".so";
+                               "libinference-engine-" + backendNameTable[config->backend_type] + ".so";
 
                int ret = InitBackendEngine(backendLibName, config->backend_type);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
@@ -281,10 +297,11 @@ namespace Common
 
                std::string backendNameTable[INFERENCE_BACKEND_MAX] = {
                        [INFERENCE_BACKEND_OPENCV] = "opencv",
-                       [INFERENCE_BACKEND_TFLITE] = "tflite",
+                       [INFERENCE_BACKEND_TFLITE_NATIVE] = "tflite",
+                       [INFERENCE_BACKEND_TFLITE] = "mlapi",
                        [INFERENCE_BACKEND_ARMNN] = "armnn",
-                       [INFERENCE_BACKEND_MLAPI] = "mlapi",
-                       [INFERENCE_BACKEND_ONE] = "mlapi"
+                       [INFERENCE_BACKEND_ONE] = "mlapi",
+                       [INFERENCE_BACKEND_VIVANTE] = "mlapi"
                };
 
                std::string backendLibName =
index 736f1fe..349234d 100644 (file)
 #include "inference_engine_common_impl.h"
 #include "inference_engine_test_common.h"
 
+static std::map<std::string, int> BackendTypes
+       = {
+               { "opencv", INFERENCE_BACKEND_OPENCV },
+               { "tflite-native", INFERENCE_BACKEND_TFLITE_NATIVE },
+               { "tflite", INFERENCE_BACKEND_TFLITE },
+               { "armnn", INFERENCE_BACKEND_ARMNN },
+               { "one", INFERENCE_BACKEND_ONE },
+               { "vivante", INFERENCE_BACKEND_VIVANTE }
+       };
+
 typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
                                   int, int, int, std::vector<std::string>,
                                   std::vector<std::string>, std::vector<std::string>,
@@ -126,6 +136,14 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                return;
        }
 
+       std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+       if (type == BackendTypes.end()) {
+               ASSERT_TRUE(0);
+               return;
+       }
+
+       config.backend_type = type->second;
+
        ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -321,6 +339,15 @@ TEST_P(InferenceEngineCaffeTest, Inference)
                return;
        }
 
+
+       std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+       if (type == BackendTypes.end()) {
+               ASSERT_TRUE(0);
+               return;
+       }
+
+       config.backend_type = type->second;
+
        ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -518,6 +545,14 @@ TEST_P(InferenceEngineDldtTest, Inference)
                return;
        }
 
+       std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+       if (type == BackendTypes.end()) {
+               ASSERT_TRUE(0);
+               return;
+       }
+
+       config.backend_type = type->second;
+
        ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -742,6 +777,111 @@ INSTANTIATE_TEST_CASE_P(
                                // mobilenet based image classification test
                                // TFLITE.
                                ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               // object detection test
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               // face detection test
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               ParamType_Infer(
+                                               "tflite-native", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+                               // mobilenet based image classification test
+                               // TFLITE(through MLAPI).
+                               ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_CPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
@@ -750,7 +890,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
                                                { 3 }),
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU,
+                                               "tflite", INFERENCE_TARGET_CPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/image_classification.bin" }, 224,
@@ -768,7 +908,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU,
+                                               "tflite", INFERENCE_TARGET_CPU,
                                                TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
@@ -789,7 +929,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
                                                { 451, 474, 714, 969 }),
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -812,7 +952,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
                                                { 727, 225, 960, 555 }),
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
                                                { "normalized_input_image_tensor" },
@@ -834,7 +974,7 @@ INSTANTIATE_TEST_CASE_P(
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 }),
                                ParamType_Infer(
-                                               "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
                                                10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
                                                { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
                                                { "image" },
index 337e83e..30fc1fc 100644 (file)
@@ -36,6 +36,16 @@ enum
        INFERENCE_ENGINE_PROFILER_MAX
 };
 
+static std::map<std::string, int> BackendTypes
+       = {
+               { "opencv", INFERENCE_BACKEND_OPENCV },
+               { "tflite-native", INFERENCE_BACKEND_TFLITE_NATIVE },
+               { "armnn", INFERENCE_BACKEND_ARMNN },
+               { "tflite", INFERENCE_BACKEND_TFLITE },
+               { "one", INFERENCE_BACKEND_ONE },
+               { "vivante", INFERENCE_BACKEND_VIVANTE }
+       };
+
 typedef std::tuple<std::string> ParamType_One;
 typedef std::tuple<std::string, int> ParamType_Two;
 typedef std::tuple<std::string, int, std::vector<std::string> > ParamType_Three;
@@ -80,14 +90,13 @@ static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine,
                                                                                                int &target_devices) -> int {
        inference_engine_config config = { backend_name, 0, target_devices };
 
-       // backend_type is valid only in case backend_name is "mlapi".
-       if (!backend_name.compare("mlapi")) {
-               if (!(target_devices & INFERENCE_TARGET_CUSTOM))
-                       config.backend_type = INFERENCE_BACKEND_ONE;
-               else
-                       config.backend_type = INFERENCE_BACKEND_MLAPI;
+       std::map<std::string, int>::iterator type = BackendTypes.find(backend_name);
+       if (type == BackendTypes.end()) {
+               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
        }
 
+       config.backend_type = type->second;
+
        int ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE)
                return ret;
@@ -691,7 +700,7 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1,
                                                                // OPENCV
                                                                ParamType_One("opencv"),
                                                                // ML Single API for NNStreamer.
-                                                               ParamType_One("mlapi")
+                                                               ParamType_One("one")
                                                                /* TODO */
                                                                ));
 
@@ -705,7 +714,7 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2,
                                                                // OPENCV.
                                                                ParamType_Two("opencv", INFERENCE_TARGET_CPU),
                                                                // ML Single API for NNStreamer.
-                                                               ParamType_Two("mlapi", INFERENCE_TARGET_CPU)
+                                                               ParamType_Two("one", INFERENCE_TARGET_CPU)
                                                                /* TODO */
                                                                ));
 
@@ -729,7 +738,7 @@ INSTANTIATE_TEST_CASE_P(
                                                  "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }),
                                // ML Single API for NNStreamer.
                                ParamType_Three(
-                                               "mlapi", INFERENCE_TARGET_CPU,
+                                               "one", INFERENCE_TARGET_CPU,
                                                { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" })
                                /* TODO */
                                ));
@@ -749,7 +758,7 @@ INSTANTIATE_TEST_CASE_P(
                                ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
                                                          224, 3, { "test_name" }),
                                // ML Single API for NNStreamer.
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          224, 3, { "test_name" })
                                /* TODO */
                                ));
@@ -790,14 +799,14 @@ INSTANTIATE_TEST_CASE_P(
                                ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
                                                          224, 3, { "" }),
                                // ML Single API for NNStreamer.
-                               ParamType_Six("mlapi", -1, 224, 224, 3, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
+                               ParamType_Six("one", -1, 224, 224, 3, { "test_name" }),
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
                                                          224, 3, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          0, 3, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          224, 0, { "test_name" }),
-                               ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+                               ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
                                                          224, 3, { "" })
                                /* TODO */
                                ));
@@ -837,7 +846,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                // ML Single API for NNStreamer.
                                ParamType_Many(
-                                               "mlapi", INFERENCE_ENGINE_PROFILER_OFF,
+                                               "one", INFERENCE_ENGINE_PROFILER_OFF,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
@@ -874,7 +883,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                // ML Single API for NNStreamer.
                                ParamType_Many(
-                                               "mlapi", INFERENCE_ENGINE_PROFILER_FILE,
+                                               "one", INFERENCE_ENGINE_PROFILER_FILE,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
@@ -911,7 +920,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { 281 }),
                                // ML Single API for NNStreamer.
                                ParamType_Many(
-                                               "mlapi", INFERENCE_ENGINE_PROFILER_CONSOLE,
+                                               "one", INFERENCE_ENGINE_PROFILER_CONSOLE,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
@@ -933,7 +942,7 @@ INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G7,
                                                                // ML Single API for NNStreamer with On-device Neural Engine tensor filter.
                                                                ParamType_One_Int(INFERENCE_BACKEND_ONE),
                                                                // ML Single API for NNStreamer with Vinvante NPU.
-                                                               ParamType_One_Int(INFERENCE_BACKEND_MLAPI)
+                                                               ParamType_One_Int(INFERENCE_BACKEND_VIVANTE)
                                                                /* TODO */
                                                                ));