return ret;
}
-int InferenceEngineCommon::Load(std::vector<std::string> model_paths, unsigned int num_of_models)
+int InferenceEngineCommon::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
{
LOGI("ENTER");
- // TODO. Consider multiple models.
-
- std::string model = model_paths.front();
- LOGI("Load %s model.", model.c_str());
-
- int ret = engine->Load(model_paths, num_of_models);
+ int ret = engine->Load(model_paths, model_format);
if (ret != INFERENCE_ENGINE_ERROR_NONE)
LOGE("Fail to load InferenceEngineVision");
*
* @since_tizen 6.0
*/
- virtual int Load(std::vector<std::string> model_paths, unsigned int num_of_models) { return 0; }
+ virtual int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) { return 0; }
/**
* @brief Create a memory. Deprecated.
*
* @since_tizen 6.0
*/
- int Load(std::vector<std::string> model_paths, unsigned int num_of_models);
+ int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
/**
* @brief Get an input layer's type such as float32, float16, and so on. Deprecated.
INFERENCE_TARGET_MAX = 1 << 3,
} inference_target_type_e;
+typedef enum {
+ INFERENCE_MODEL_NONE = 0,
+ INFERENCE_MODEL_CAFFE, /**< CAFFE. *.prototxt config file is needed. */
+ INFERENCE_MODEL_TF, /**< Tensorflow. *.pbtxt config file is needed. */
+ INFERENCE_MODEL_TFLITE, /** Tensorflow-Lite. */
+ INFERENCE_MODEL_TORCH, /**< Torch */
+ INFERENCE_MODEL_DARKNET, /**< Darknet. *.cfg config file is needed. */
+ INFERENCE_MODEL_DLDT, /**< DLDT. *.xml config file is needed. */
+ INFERENCE_MODEL_ONNX, /**< ONNX */
+ INFERENCE_MODEL_MAX
+} inference_model_format_e;
+
/**
* @brief Enumeration for tensor shape type.
*
*
* @since_tizen 6.0
*/
- int Load(std::vector<std::string> model_paths, unsigned int num_of_models);
+ int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
/**
* @brief Get capacity from a given backend engine.
return ret;
}
-int InferenceEngineVision::Load(std::vector<std::string> model_paths, unsigned int num_of_models)
+int InferenceEngineVision::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
{
LOGI("ENTER");
- // TODO. Consider 'num_of_models > 1' case.
- // Load label data.
- std::string label_file = model_paths[1];
+ std::string label_file;
+ if (model_format == INFERENCE_MODEL_TFLITE || model_format == INFERENCE_MODEL_TORCH) {
+ label_file = model_paths[1];
+ } else {
+ label_file = model_paths[2];
+ }
+
size_t userFileLength = label_file.length();
if (userFileLength > 0 && access(label_file.c_str(), F_OK)) {
LOGE("Label file path in [%s] ", label_file.c_str());
}
// Load model files.
- ret = mCommonEngine->Load(model_paths, num_of_models);
+ ret = mCommonEngine->Load(model_paths, model_format);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
LOGE("Fail to load InferenceEngineVision");
return ret;