From: Inki Dae Date: Wed, 5 Jan 2022 04:13:58 +0000 (+0900) Subject: add NNTRAINER backend support X-Git-Tag: submit/tizen/20220105.080154~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fchanges%2F37%2F268937%2F3;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git add NNTRAINER backend support [Version] : 0.4.0-0 [Issue type] : new feature Added NNTRAINER backend support. NNTRAINER is an training engine including inference feature for its internal model. Change-Id: If20ccbf8b709f0af6ac6b71f53c5995b6ec05a4c Signed-off-by: Inki Dae --- diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index 97e668c..6be3452 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.3.2 +Version: 0.4.0 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index e119e57..c079ee3 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -233,6 +233,10 @@ namespace MLAPIImpl LOGI("SNPE tensor filter will be used."); return std::make_tuple(ML_NNFW_TYPE_SNPE, ML_NNFW_HW_ANY); + case INFERENCE_BACKEND_NNTRAINER: + LOGI("NNTRAINER tensor filter will be used."); + return std::make_tuple(ML_NNFW_TYPE_NNTR_INF, ML_NNFW_HW_ANY); + default: LOGE("Invalid plugin type."); throw std::invalid_argument("invalid tensor type."); @@ -268,6 +272,8 @@ namespace MLAPIImpl case INFERENCE_BACKEND_TFLITE: /* fall through */ case INFERENCE_BACKEND_SNPE: + /* fall through */ + case INFERENCE_BACKEND_NNTRAINER: if (!IsFileReadable(model_paths[0])) throw std::runtime_error("invalid path"); return model_paths[0]; @@ -326,7 +332,7 @@ namespace MLAPIImpl int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info, nnfw_type, nnfw_hw, GetCustomProp()); if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_open(%d).", err); + LOGE("Failed to request ml_single_open_full(%d).", err); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; }