This patch introduces default tensor filter type support for MLAPI.
In case that user requests a inference with INFERENCE_BACKEND_MLAPI type,
inference engine mlapi backend requests the inference to a tensor filter
type corresponding to a given device type such as CPU, GPU or NPU.
1. If device type is CPU or GPU then it requests a inference to
MLAPI backend with a tensor filter type written in mDefault_MLAPI_Backend[0].
2. If device type is NPU then it requests a inference to
MLAPI backend with a tensor filter type written in mDefault_MLAPI_Backend[1].
Change-Id: I898a993c15ec24504b663b49ba5ac48ac4a25ff4
Signed-off-by: Inki Dae <inki.dae@samsung.com>
~InferenceEngineCommon();
/**
+ * @brief Load configuration information from ini file.
+ * The ini file provides default tensor filter types for
+ * MLAPI backend. Please refer to /etc/inference/inference_engine_mlapi_backend.ini file.
+ */
+ int LoadConfigFile(void);
+
+ /**
* @brief Load a backend engine library with a given backend name.
* @details This callback loads a backend engine library with a given backend name.
* In order to find a backend engine library corresponding to the given backend name,
* @since_tizen 6.0
* @param[in] backend_type A eumeration value which indicates one of backend types - refer to inference_backend_type_e.
*/
- int BindBackend(int backend_type);
+ int BindBackend(int backend_type, int device_type);
/**
* @brief Unload a backend engine library.
private:
int InitBackendEngine(const std::string &backend_path,
- int backend_type);
+ int backend_type, int device_type);
int CheckTensorBuffers(
std::vector<inference_engine_tensor_buffer> &buffers);
int CheckLayerProperty(inference_engine_layer_property &property);
// In default, we use profiler.
bool mUseProfiler;
unsigned int mProfilerDumpType;
+ // 0 : default tensor filter type for MLAPI with CPU and GPU.
+ // 1 : default tensor filter type for MLAPI with NPU.
+ uint32_t mDefault_MLAPI_Backend[2];
protected:
void *mBackendModule;
--- /dev/null
+/**
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_PRIVATE_TYPE_H__
+#define __INFERENCE_ENGINE_PRIVATE_TYPE_H__
+
+typedef enum {
+ INFERENCE_BACKEND_NPU_NONE = -1,
+ INFERENCE_BACKEND_NPU_VIVANTE, /**< Vivante NPU. */
+ INFERENCE_BACKEND_NPU_TRIV2, /**< TRIV2 NPU. */
+ INFERENCE_BACKEND_NPU_MAX
+} inference_backend_npu_type_e;
+
+#endif /* __INFERENCE_ENGINE_PRIVATE_TYPE_H__ */
*/
typedef struct _inference_engine_config {
std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt, nnstreamer) */
- int backend_type; /**< a tensor filter plugin type for NNStreamer if a backend is NNStreamer. */
+ int backend_type; /**< a tensor filter plugin type for MLAPI if a backend is MLAPI. Ps. if backend_type is -1 then inference engine framework updates backend_type properly according to a given backend_name internally. */
int target_devices; /**< which device or devices to be targeted for inference. (Please, refer to inference_target_type_e) */
// TODO.
} inference_engine_config;
#include "inference_engine_error.h"
#include "inference_engine_common_impl.h"
+#include "inference_engine_private_type.h"
#include "inference_engine_ini.h"
#include <fstream>
#include <iostream>
#include <time.h>
#include <dlfcn.h>
#include <experimental/filesystem>
+#include <iniparser.h>
extern "C"
{
{
namespace Common
{
+ const char *INFERENCE_MLAPI_INI_FILENAME =
+ "/etc/inference/inference_engine_mlapi_backend.ini";
+
InferenceEngineCommon::InferenceEngineCommon() :
mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
mProfiler(),
mBackendHandle()
{
LOGI("ENTER");
+
LOGI("LEAVE");
}
LOGW("LEAVE");
}
+ int InferenceEngineCommon::LoadConfigFile(void)
+ {
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+ int npu_type = -1, cpu_and_gpu_type = -1;
+ char *default_type_for_npu = NULL, *default_type_for_cpu_gpu = NULL;
+
+ dictionary *dict = iniparser_load(INFERENCE_MLAPI_INI_FILENAME);
+ if (dict == NULL) {
+ LOGE("Fail to load %s file.\n", INFERENCE_MLAPI_INI_FILENAME);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ default_type_for_npu = (char *)iniparser_getstring(dict,
+ "inference mlapi npu backend:default tensor filter type",
+ NULL);
+ if (default_type_for_npu == NULL) {
+ LOGE("Fail to load default tensor filter type for MLAPI with NPU.");
+ ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ goto out;
+ }
+
+ default_type_for_cpu_gpu = (char *)iniparser_getstring(dict,
+ "inference mlapi cpu and gpu backend:default tensor filter type",
+ NULL);
+ if (default_type_for_cpu_gpu == NULL) {
+ LOGE("Fail to load default tensor filter type for MLAPI with CPU and GPU.");
+ ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ goto out;
+ }
+
+ npu_type = atoi(default_type_for_npu);
+ cpu_and_gpu_type = atoi(default_type_for_cpu_gpu);
+
+ // Check if loaded configuration value is valid or not.
+ if (npu_type != INFERENCE_BACKEND_NPU_VIVANTE &&
+ npu_type != INFERENCE_BACKEND_NPU_TRIV2) {
+ LOGE("Invalid tensor filter type for MLAPI with NPU.");
+ ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ goto out;
+ }
+
+ if (cpu_and_gpu_type != INFERENCE_BACKEND_TFLITE) {
+ LOGE("Invalid tensor filter type for MLAPI with CPU and GPU.");
+ ret = INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ goto out;
+ }
+
+ LOGI("npu = %d, gpu = %d", npu_type, cpu_and_gpu_type);
+
+ // TODO. Update mDefault_MLAPI_Backend type using ini config file.
+ // 0 : default tensor filter type for MLAPI with CPU and GPU.
+ // 1 : default tensor filter type for MLAPI with NPU.
+ mDefault_MLAPI_Backend[0] = cpu_and_gpu_type;
+ mDefault_MLAPI_Backend[1] = npu_type;
+
+out:
+ iniparser_freedict(dict);
+
+ return ret;
+
+ }
+
int InferenceEngineCommon::CheckTensorBuffers(
std::vector<inference_engine_tensor_buffer> &buffers)
{
int
InferenceEngineCommon::InitBackendEngine(const std::string &backend_path,
- int backend_type)
+ int backend_type, int device_type)
{
LOGI("lib: %s", backend_path.c_str());
mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
return INFERENCE_ENGINE_ERROR_INTERNAL;
}
- // If a backend is ML Single API of NNStreamer or ONE then set a tensor filter plugin type.
- if (backend_type == INFERENCE_BACKEND_ONE ||
- backend_type == INFERENCE_BACKEND_MLAPI) {
- int ret = mBackendHandle->SetPrivateData(&backend_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Failed to set a tensor filter plugin.");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
- }
+ LOGI("backend_type = %d, device_type = %d", backend_type, device_type);
+
+ // Update tensor filter type for MLAPI if a given backend type is MLAPI.
+ if (backend_type == INFERENCE_BACKEND_MLAPI) {
+ if (device_type & (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU))
+ backend_type = mDefault_MLAPI_Backend[0];
+ if (device_type & INFERENCE_TARGET_CUSTOM)
+ backend_type = mDefault_MLAPI_Backend[1];
+
+ LOGI("tensor filter type is %d\n", backend_type);
+ }
+
+ int ret = mBackendHandle->SetPrivateData(&backend_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to set a tensor filter plugin type for MLAPI.");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
return INFERENCE_ENGINE_ERROR_NONE;
std::string backendLibName =
"libinference-engine-" + config->backend_name + ".so";
- int ret = InitBackendEngine(backendLibName, config->backend_type);
+ // If backend_type of config is -1 then update it according to backend_name.
+ if (config->backend_type == -1) {
+ std::map<std::string,int> BackendTable;
+
+ BackendTable.insert(std::make_pair("tflite",INFERENCE_BACKEND_TFLITE));
+ BackendTable.insert(std::make_pair("armnn",INFERENCE_BACKEND_ARMNN));
+ BackendTable.insert(std::make_pair("opencv",INFERENCE_BACKEND_OPENCV));
+ BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI));
+
+ config->backend_type = BackendTable.find(config->backend_name)->second;
+ }
+
+ int ret = InitBackendEngine(backendLibName, config->backend_type, config->target_devices);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return ret;
}
return INFERENCE_ENGINE_ERROR_NONE;
}
- int InferenceEngineCommon::BindBackend(int backend_type)
+ int InferenceEngineCommon::BindBackend(int backend_type, int device_type)
{
LOGI("ENTER");
std::string backendLibName =
"libinference-engine-" + backendNameTable[backend_type] + ".so";
- int ret = InitBackendEngine(backendLibName, backend_type);
+ int ret = InitBackendEngine(backendLibName, backend_type, device_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return ret;
}
<< ", target device = " << Target_Formats[target_devices]
<< std::endl;
inference_engine_config config = { .backend_name = backend_name,
- .backend_type = 0,
+ .backend_type = -1,
.target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
return;
}
+ ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
<< ", target device = " << Target_Formats[target_devices]
<< std::endl;
inference_engine_config config = { .backend_name = backend_name,
- .backend_type = 0,
+ .backend_type = -1,
.target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
return;
}
+ ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
<< ", target device = " << Target_Formats[target_devices]
<< std::endl;
inference_engine_config config = { .backend_name = backend_name,
- .backend_type = 0,
+ .backend_type = -1,
.target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
return;
}
+ ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
ParamType_Infer(
"armnn", INFERENCE_TARGET_GPU,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
// object detection test
ParamType_Infer(
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
// object detection test
ParamType_Infer(
{ "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // TFLITE via MLAPI.
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 })
/* TODO */
));
#include "inference_engine_error.h"
#include "inference_engine_common_impl.h"
+#include "inference_engine_private_type.h"
#include "inference_engine_test_common.h"
enum
static auto InferenceEngineInit_One_Param =
[](InferenceEngineCommon *engine, std::string &backend_name) -> int {
- inference_engine_config config = { backend_name, 0, 0 };
+ inference_engine_config config = { backend_name, -1, 0 };
+
+ int ret = engine->LoadConfigFile();
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
return engine->BindBackend(&config);
};
static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine,
std::string &backend_name,
int &target_devices) -> int {
- inference_engine_config config = { backend_name, 0, target_devices };
-
- // backend_type is valid only in case backend_name is "mlapi".
- if (!backend_name.compare("mlapi")) {
- if (!(target_devices & INFERENCE_TARGET_CUSTOM))
- config.backend_type = INFERENCE_BACKEND_ONE;
- else
- config.backend_type = INFERENCE_BACKEND_MLAPI;
- }
+ inference_engine_config config = { backend_name, -1, target_devices };
+
+ int ret = engine->LoadConfigFile();
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
- int ret = engine->BindBackend(&config);
+ ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE)
return ret;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
- int ret = engine->BindBackend(backend_type);
+ int ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->BindBackend(backend_type, 0);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
engine->UnbindBackend();
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
- int ret = engine->BindBackend(backend_type);
+ int ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->BindBackend(backend_type, 0);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
}
// ML Single API for NNStreamer.
ParamType_Three(
"mlapi", INFERENCE_TARGET_CPU,
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" })
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" })
/* TODO */
));
{ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
"/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
{ 281 }),
- // ML Single API for NNStreamer.
+ // ONE.
ParamType_Many(
"mlapi", INFERENCE_ENGINE_PROFILER_OFF,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
// ARMNN.
ParamType_Many(
{ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
"/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
{ 281 }),
- // ML Single API for NNStreamer.
+ // ONE.
ParamType_Many(
"mlapi", INFERENCE_ENGINE_PROFILER_FILE,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
// ARMNN.
ParamType_Many(
{ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
"/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
{ 281 }),
- // ML Single API for NNStreamer.
+ // ONE.
ParamType_Many(
"mlapi", INFERENCE_ENGINE_PROFILER_CONSOLE,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_model.tflite" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 })
/* TODO */
));