From: Inki Dae Date: Tue, 11 Feb 2020 06:25:53 +0000 (+0900) Subject: Pass model file format to backend engine X-Git-Tag: submit/tizen/20200423.063253~61 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b353e086596b57f9ab798f762e31120ba20fad0d;p=platform%2Fcore%2Fmultimedia%2Finference-engine-interface.git Pass model file format to backend engine This patch passes model file format coming from Inference layer to backend engine. As for this, it adds enumeration values which indicate various model file format. Change-Id: I11b141399498c73cda6a34350a6f6fd3eac74a17 Signed-off-by: Inki Dae --- diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index 0501c6c..503ecc1 100755 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -162,16 +162,11 @@ int InferenceEngineCommon::SetTargetDevices(int types) return ret; } -int InferenceEngineCommon::Load(std::vector model_paths, unsigned int num_of_models) +int InferenceEngineCommon::Load(std::vector model_paths, inference_model_format_e model_format) { LOGI("ENTER"); - // TODO. Consider multiple models. - - std::string model = model_paths.front(); - LOGI("Load %s model.", model.c_str()); - - int ret = engine->Load(model_paths, num_of_models); + int ret = engine->Load(model_paths, model_format); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to load InferenceEngineVision"); diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index 168acf1..886d431 100755 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -57,7 +57,7 @@ public: * * @since_tizen 6.0 */ - virtual int Load(std::vector model_paths, unsigned int num_of_models) { return 0; } + virtual int Load(std::vector model_paths, inference_model_format_e model_format) { return 0; } /** * @brief Create a memory. Deprecated. diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index cbd0fb0..a299205 100755 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -86,7 +86,7 @@ public: * * @since_tizen 6.0 */ - int Load(std::vector model_paths, unsigned int num_of_models); + int Load(std::vector model_paths, inference_model_format_e model_format); /** * @brief Get an input layer's type such as float32, float16, and so on. Deprecated. diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index c2bd4b2..925605d 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -55,6 +55,18 @@ typedef enum { INFERENCE_TARGET_MAX = 1 << 3, } inference_target_type_e; +typedef enum { + INFERENCE_MODEL_NONE = 0, + INFERENCE_MODEL_CAFFE, /**< CAFFE. *.prototxt config file is needed. */ + INFERENCE_MODEL_TF, /**< Tensorflow. *.pbtxt config file is needed. */ + INFERENCE_MODEL_TFLITE, /** Tensorflow-Lite. */ + INFERENCE_MODEL_TORCH, /**< Torch */ + INFERENCE_MODEL_DARKNET, /**< Darknet. *.cfg config file is needed. */ + INFERENCE_MODEL_DLDT, /**< DLDT. *.xml config file is needed. */ + INFERENCE_MODEL_ONNX, /**< ONNX */ + INFERENCE_MODEL_MAX +} inference_model_format_e; + /** * @brief Enumeration for tensor shape type. * diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h index f231fe4..ff327ae 100755 --- a/include/inference_engine_vision_impl.h +++ b/include/inference_engine_vision_impl.h @@ -151,7 +151,7 @@ public: * * @since_tizen 6.0 */ - int Load(std::vector model_paths, unsigned int num_of_models); + int Load(std::vector model_paths, inference_model_format_e model_format); /** * @brief Get capacity from a given backend engine. diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index 04b75fc..b6100c6 100755 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -250,13 +250,17 @@ int InferenceEngineVision::SetOutputTensorParamNodes(std::vector no return ret; } -int InferenceEngineVision::Load(std::vector model_paths, unsigned int num_of_models) +int InferenceEngineVision::Load(std::vector model_paths, inference_model_format_e model_format) { LOGI("ENTER"); - // TODO. Consider 'num_of_models > 1' case. - // Load label data. - std::string label_file = model_paths[1]; + std::string label_file; + if (model_format == INFERENCE_MODEL_TFLITE || model_format == INFERENCE_MODEL_TORCH) { + label_file = model_paths[1]; + } else { + label_file = model_paths[2]; + } + size_t userFileLength = label_file.length(); if (userFileLength > 0 && access(label_file.c_str(), F_OK)) { LOGE("Label file path in [%s] ", label_file.c_str()); @@ -270,7 +274,7 @@ int InferenceEngineVision::Load(std::vector model_paths, unsigned i } // Load model files. - ret = mCommonEngine->Load(model_paths, num_of_models); + ret = mCommonEngine->Load(model_paths, model_format); if (ret != INFERENCE_ENGINE_ERROR_NONE) { LOGE("Fail to load InferenceEngineVision"); return ret;