Introduce default tensor filter type support for MLAPI 51/246251/2
authorInki Dae <inki.dae@samsung.com>
Tue, 27 Oct 2020 08:34:27 +0000 (17:34 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 28 Oct 2020 07:30:11 +0000 (16:30 +0900)
This patch introduces default tensor filter type support for MLAPI.

In case that user requests a inference with INFERENCE_BACKEND_MLAPI type,
inference engine mlapi backend requests the inference to a tensor filter
type corresponding to a given device type such as CPU, GPU or NPU.

1. If device type is CPU or GPU then it requests a inference to
   MLAPI backend with a tensor filter type written in mDefault_MLAPI_Backend[0].
2. If device type is NPU then it requests a inference to
   MLAPI backend with a tensor filter type written in mDefault_MLAPI_Backend[1].

Change-Id: I898a993c15ec24504b663b49ba5ac48ac4a25ff4
Signed-off-by: Inki Dae <inki.dae@samsung.com>
include/inference_engine_common_impl.h
include/inference_engine_private_type.h [new file with mode: 0644]
include/inference_engine_type.h
src/inference_engine_common_impl.cpp
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp

index 9c60d79..3af4c6c 100644 (file)
@@ -38,6 +38,13 @@ namespace Common
                ~InferenceEngineCommon();
 
                /**
+                * @brief Load configuration information from ini file.
+                *        The ini file provides default tensor filter types for
+                *        MLAPI backend. Please refer to /etc/inference/inference_engine_mlapi_backend.ini file.
+                */
+               int LoadConfigFile(void);
+
+               /**
                 * @brief Load a backend engine library with a given backend name.
                 * @details This callback loads a backend engine library with a given backend name.
                 *          In order to find a backend engine library corresponding to the given backend name,
@@ -61,7 +68,7 @@ namespace Common
                 * @since_tizen 6.0
                 * @param[in] backend_type A eumeration value which indicates one of backend types - refer to inference_backend_type_e.
                 */
-               int BindBackend(int backend_type);
+               int BindBackend(int backend_type, int device_type);
 
                /**
                 * @brief Unload a backend engine library.
@@ -234,7 +241,7 @@ namespace Common
 
        private:
                int InitBackendEngine(const std::string &backend_path,
-                                                         int backend_type);
+                                                         int backend_type, int device_type);
                int CheckTensorBuffers(
                                std::vector<inference_engine_tensor_buffer> &buffers);
                int CheckLayerProperty(inference_engine_layer_property &property);
@@ -246,6 +253,9 @@ namespace Common
                // In default, we use profiler.
                bool mUseProfiler;
                unsigned int mProfilerDumpType;
+               // 0 : default tensor filter type for MLAPI with CPU and GPU.
+               // 1 : default tensor filter type for MLAPI with NPU.
+               uint32_t mDefault_MLAPI_Backend[2];
 
        protected:
                void *mBackendModule;
diff --git a/include/inference_engine_private_type.h b/include/inference_engine_private_type.h
new file mode 100644 (file)
index 0000000..671c749
--- /dev/null
@@ -0,0 +1,27 @@
+/**
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_PRIVATE_TYPE_H__
+#define __INFERENCE_ENGINE_PRIVATE_TYPE_H__
+
+typedef enum {
+       INFERENCE_BACKEND_NPU_NONE = -1,
+       INFERENCE_BACKEND_NPU_VIVANTE,  /**< Vivante NPU. */
+       INFERENCE_BACKEND_NPU_TRIV2,    /**< TRIV2 NPU. */
+       INFERENCE_BACKEND_NPU_MAX
+} inference_backend_npu_type_e;
+
+#endif /* __INFERENCE_ENGINE_PRIVATE_TYPE_H__ */
index c2f78a0..0e42792 100644 (file)
@@ -139,7 +139,7 @@ extern "C"
         */
        typedef struct _inference_engine_config {
                std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt, nnstreamer) */
-               int backend_type; /**< a tensor filter plugin type for NNStreamer if a backend is NNStreamer. */
+               int backend_type; /**< a tensor filter plugin type for MLAPI if a backend is MLAPI. Ps. if backend_type is -1 then inference engine framework updates backend_type properly according to a given backend_name internally. */
                int target_devices; /**< which device or devices to be targeted for inference. (Please, refer to inference_target_type_e) */
                // TODO.
        } inference_engine_config;
index d27be80..66a7e90 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "inference_engine_error.h"
 #include "inference_engine_common_impl.h"
+#include "inference_engine_private_type.h"
 #include "inference_engine_ini.h"
 #include <fstream>
 #include <iostream>
@@ -23,6 +24,7 @@
 #include <time.h>
 #include <dlfcn.h>
 #include <experimental/filesystem>
+#include <iniparser.h>
 
 extern "C"
 {
@@ -46,6 +48,9 @@ namespace InferenceEngineInterface
 {
 namespace Common
 {
+       const char *INFERENCE_MLAPI_INI_FILENAME =
+                               "/etc/inference/inference_engine_mlapi_backend.ini";
+
        InferenceEngineCommon::InferenceEngineCommon() :
                        mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
                        mProfiler(),
@@ -55,6 +60,7 @@ namespace Common
                        mBackendHandle()
        {
                LOGI("ENTER");
+
                LOGI("LEAVE");
        }
 
@@ -70,6 +76,68 @@ namespace Common
                LOGW("LEAVE");
        }
 
+       int InferenceEngineCommon::LoadConfigFile(void)
+       {
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
+               int npu_type = -1, cpu_and_gpu_type = -1;
+               char *default_type_for_npu = NULL, *default_type_for_cpu_gpu = NULL;
+
+               dictionary *dict = iniparser_load(INFERENCE_MLAPI_INI_FILENAME);
+               if (dict == NULL) {
+                       LOGE("Fail to load %s file.\n", INFERENCE_MLAPI_INI_FILENAME);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               default_type_for_npu = (char *)iniparser_getstring(dict,
+                                       "inference mlapi npu backend:default tensor filter type",
+                                       NULL);
+               if (default_type_for_npu == NULL) {
+                       LOGE("Fail to load default tensor filter type for MLAPI with NPU.");
+                       ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       goto out;
+               }
+
+               default_type_for_cpu_gpu = (char *)iniparser_getstring(dict,
+                                       "inference mlapi cpu and gpu backend:default tensor filter type",
+                                       NULL);
+               if (default_type_for_cpu_gpu == NULL) {
+                       LOGE("Fail to load default tensor filter type for MLAPI with CPU and GPU.");
+                       ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       goto out;
+               }
+
+               npu_type = atoi(default_type_for_npu);
+               cpu_and_gpu_type = atoi(default_type_for_cpu_gpu);
+
+               // Check if loaded configuration value is valid or not.
+               if (npu_type != INFERENCE_BACKEND_NPU_VIVANTE &&
+                       npu_type != INFERENCE_BACKEND_NPU_TRIV2) {
+                       LOGE("Invalid tensor filter type for MLAPI with NPU.");
+                       ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       goto out;
+               }
+
+               if (cpu_and_gpu_type != INFERENCE_BACKEND_TFLITE) {
+                       LOGE("Invalid tensor filter type for MLAPI with CPU and GPU.");
+                       ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       goto out;
+               }
+
+               LOGI("npu = %d, gpu = %d", npu_type, cpu_and_gpu_type);
+
+               // TODO. Update mDefault_MLAPI_Backend type using ini config file.
+               // 0 : default tensor filter type for MLAPI with CPU and GPU.
+               // 1 : default tensor filter type for MLAPI with NPU.
+               mDefault_MLAPI_Backend[0] = cpu_and_gpu_type;
+               mDefault_MLAPI_Backend[1] = npu_type;
+
+out:
+               iniparser_freedict(dict);
+
+               return ret;
+
+       }
+
        int InferenceEngineCommon::CheckTensorBuffers(
                        std::vector<inference_engine_tensor_buffer> &buffers)
        {
@@ -180,7 +248,7 @@ namespace Common
 
        int
        InferenceEngineCommon::InitBackendEngine(const std::string &backend_path,
-                                                                                        int backend_type)
+                                                                                        int backend_type, int device_type)
        {
                LOGI("lib: %s", backend_path.c_str());
                mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
@@ -210,14 +278,22 @@ namespace Common
                        return INFERENCE_ENGINE_ERROR_INTERNAL;
                }
 
-               // If a backend is ML Single API of NNStreamer or ONE then set a tensor filter plugin type.
-               if (backend_type == INFERENCE_BACKEND_ONE ||
-                       backend_type == INFERENCE_BACKEND_MLAPI) {
-                       int ret = mBackendHandle->SetPrivateData(&backend_type);
-                       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                               LOGE("Failed to set a tensor filter plugin.");
-                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-                       }
+               LOGI("backend_type = %d, device_type = %d", backend_type, device_type);
+
+               // Update tensor filter type for MLAPI if a given backend type is MLAPI.
+               if (backend_type == INFERENCE_BACKEND_MLAPI) {
+                       if (device_type & (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU))
+                               backend_type = mDefault_MLAPI_Backend[0];
+                       if (device_type & INFERENCE_TARGET_CUSTOM)
+                               backend_type = mDefault_MLAPI_Backend[1];
+
+                       LOGI("tensor filter type is %d\n", backend_type);
+               }
+
+               int ret = mBackendHandle->SetPrivateData(&backend_type);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Failed to set a tensor filter plugin type for MLAPI.");
+                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                }
 
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -245,7 +321,19 @@ namespace Common
                std::string backendLibName =
                                "libinference-engine-" + config->backend_name + ".so";
 
-               int ret = InitBackendEngine(backendLibName, config->backend_type);
+               // If backend_type of config is -1 then update it according to backend_name.
+               if (config->backend_type == -1) {
+                       std::map<std::string,int> BackendTable;
+
+                       BackendTable.insert(std::make_pair("tflite",INFERENCE_BACKEND_TFLITE));
+                       BackendTable.insert(std::make_pair("armnn",INFERENCE_BACKEND_ARMNN));
+                       BackendTable.insert(std::make_pair("opencv",INFERENCE_BACKEND_OPENCV));
+                       BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI));
+
+                       config->backend_type = BackendTable.find(config->backend_name)->second;
+               }
+
+               int ret = InitBackendEngine(backendLibName, config->backend_type, config->target_devices);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                        return ret;
                }
@@ -259,7 +347,7 @@ namespace Common
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-       int InferenceEngineCommon::BindBackend(int backend_type)
+       int InferenceEngineCommon::BindBackend(int backend_type, int device_type)
        {
                LOGI("ENTER");
 
@@ -290,7 +378,7 @@ namespace Common
                std::string backendLibName =
                                "libinference-engine-" + backendNameTable[backend_type] + ".so";
 
-               int ret = InitBackendEngine(backendLibName, backend_type);
+               int ret = InitBackendEngine(backendLibName, backend_type, device_type);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                        return ret;
                }
index be6da43..c499e77 100644 (file)
@@ -104,7 +104,7 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                          << ", target device = " << Target_Formats[target_devices]
                          << std::endl;
        inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = 0,
+                                                                          .backend_type = -1,
                                                                           .target_devices = target_devices };
 
        auto engine = std::make_unique<InferenceEngineCommon>();
@@ -126,6 +126,9 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                return;
        }
 
+       ret = engine->LoadConfigFile();
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
        ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -299,7 +302,7 @@ TEST_P(InferenceEngineCaffeTest, Inference)
                          << ", target device = " << Target_Formats[target_devices]
                          << std::endl;
        inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = 0,
+                                                                          .backend_type = -1,
                                                                           .target_devices = target_devices };
 
        auto engine = std::make_unique<InferenceEngineCommon>();
@@ -321,6 +324,9 @@ TEST_P(InferenceEngineCaffeTest, Inference)
                return;
        }
 
+       ret = engine->LoadConfigFile();
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
        ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -496,7 +502,7 @@ TEST_P(InferenceEngineDldtTest, Inference)
                          << ", target device = " << Target_Formats[target_devices]
                          << std::endl;
        inference_engine_config config = { .backend_name = backend_name,
-                                                                          .backend_type = 0,
+                                                                          .backend_type = -1,
                                                                           .target_devices = target_devices };
 
        auto engine = std::make_unique<InferenceEngineCommon>();
@@ -518,6 +524,9 @@ TEST_P(InferenceEngineDldtTest, Inference)
                return;
        }
 
+       ret = engine->LoadConfigFile();
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
        ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
@@ -660,7 +669,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
                                                { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                ParamType_Infer(
                                                "armnn", INFERENCE_TARGET_GPU,
@@ -669,7 +678,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
                                                { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
@@ -765,7 +774,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
                                                { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                ParamType_Infer(
                                                "tflite", INFERENCE_TARGET_GPU,
@@ -774,7 +783,7 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/opt/usr/images/image_classification_q.bin" }, 224,
                                                224, 3, { "input" },
                                                { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                // object detection test
                                ParamType_Infer(
@@ -842,6 +851,109 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
                                                { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
                                                  351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               // TFLITE via MLAPI.
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/image_classification.bin" }, 224,
+                                               224, 3, { "input_2" }, { "dense_3/Softmax" },
+                                               { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+                                               { 3 }),
+                               // quantized mobilenet based image classification test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_CPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU,
+                                               TEST_IMAGE_CLASSIFICATION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_UINT8,
+                                               { "/opt/usr/images/image_classification_q.bin" }, 224,
+                                               224, 3, { "input" },
+                                               { "MobilenetV1/Predictions/Reshape_1" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+                                               { 955 }),
+                               // object detection test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+                                               { 451, 474, 714, 969 }),
+                               // face detection test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+                                               { "normalized_input_image_tensor" },
+                                               { "TFLite_Detection_PostProcess",
+                                                 "TFLite_Detection_PostProcess:1",
+                                                 "TFLite_Detection_PostProcess:2",
+                                                 "TFLite_Detection_PostProcess:3" },
+                                               { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+                                               { 727, 225, 960, 555 }),
+                               // pose estimation test
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
+                                                 123, 99,  287, 381, 451, 287, 381, 475 }),
+                               ParamType_Infer(
+                                               "mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+                                               10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+                                               { "image" },
+                                               { "Convolutional_Pose_Machine/stage_5_out" },
+                                               { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+                                               { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+                                                 351, 382, 382, 382, 76,  146, 170, 193, 216, 146,
                                                  123, 99,  287, 381, 451, 287, 381, 475 })
                                /* TODO */
                                ));
index e6e2635..15fdfcf 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "inference_engine_error.h"
 #include "inference_engine_common_impl.h"
+#include "inference_engine_private_type.h"
 #include "inference_engine_test_common.h"
 
 enum
@@ -70,7 +71,11 @@ class InferenceEngineTestCase_G8
 
 static auto InferenceEngineInit_One_Param =
                [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
-       inference_engine_config config = { backend_name, 0, 0 };
+       inference_engine_config config = { backend_name, -1, 0 };
+
+       int ret = engine->LoadConfigFile();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+               return ret;
 
        return engine->BindBackend(&config);
 };
@@ -78,17 +83,13 @@ static auto InferenceEngineInit_One_Param =
 static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine,
                                                                                                std::string &backend_name,
                                                                                                int &target_devices) -> int {
-       inference_engine_config config = { backend_name, 0, target_devices };
-
-       // backend_type is valid only in case backend_name is "mlapi".
-       if (!backend_name.compare("mlapi")) {
-               if (!(target_devices & INFERENCE_TARGET_CUSTOM))
-                       config.backend_type = INFERENCE_BACKEND_ONE;
-               else
-                       config.backend_type = INFERENCE_BACKEND_MLAPI;
-       }
+       inference_engine_config config = { backend_name, -1, target_devices };
+
+       int ret = engine->LoadConfigFile();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+               return ret;
 
-       int ret = engine->BindBackend(&config);
+       ret = engine->BindBackend(&config);
        if (ret != INFERENCE_ENGINE_ERROR_NONE)
                return ret;
 
@@ -662,7 +663,10 @@ TEST_P(InferenceEngineTestCase_G7, Bind_P)
        auto engine = std::make_unique<InferenceEngineCommon>();
        ASSERT_TRUE(engine);
 
-       int ret = engine->BindBackend(backend_type);
+       int ret = engine->LoadConfigFile();
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->BindBackend(backend_type, 0);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
 
        engine->UnbindBackend();
@@ -677,7 +681,10 @@ TEST_P(InferenceEngineTestCase_G8, Bind_N)
        auto engine = std::make_unique<InferenceEngineCommon>();
        ASSERT_TRUE(engine);
 
-       int ret = engine->BindBackend(backend_type);
+       int ret = engine->LoadConfigFile();
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->BindBackend(backend_type, 0);
        ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
 }
 
@@ -730,7 +737,7 @@ INSTANTIATE_TEST_CASE_P(
                                // ML Single API for NNStreamer.
                                ParamType_Three(
                                                "mlapi", INFERENCE_TARGET_CPU,
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" })
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" })
                                /* TODO */
                                ));
 
@@ -835,14 +842,14 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
                                                  "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
                                                { 281 }),
-                               // ML Single API for NNStreamer.
+                               // ONE.
                                ParamType_Many(
                                                "mlapi", INFERENCE_ENGINE_PROFILER_OFF,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
                                                224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                // ARMNN.
                                ParamType_Many(
@@ -872,14 +879,14 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
                                                  "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
                                                { 281 }),
-                               // ML Single API for NNStreamer.
+                               // ONE.
                                ParamType_Many(
                                                "mlapi", INFERENCE_ENGINE_PROFILER_FILE,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
                                                224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 }),
                                // ARMNN.
                                ParamType_Many(
@@ -909,14 +916,14 @@ INSTANTIATE_TEST_CASE_P(
                                                { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
                                                  "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
                                                { 281 }),
-                               // ML Single API for NNStreamer.
+                               // ONE.
                                ParamType_Many(
                                                "mlapi", INFERENCE_ENGINE_PROFILER_CONSOLE,
                                                INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
                                                INFERENCE_TENSOR_DATA_TYPE_UINT8,
                                                { "/opt/usr/images/image_classification_q.bin" },
                                                224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
-                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" },
+                                               { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
                                                { 955 })
                                /* TODO */
                                ));