From: Inki Dae Date: Tue, 8 Jun 2021 03:49:48 +0000 (+0900) Subject: Consider user-given property info first X-Git-Tag: submit/tizen/20220105.080154~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8f5f3b5c447fa0889a9f03d68c8a9df7c0c24bba;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git Consider user-given property info first [Version] : 0.0.2-2 [Issue type] : bug fix Considered user-given property info first if exists. In case of ONERT, it doesn't provide input and output tensor names so we have to use the names given by user instead. This patch checks if user-given property information exists, and gets the name from the property information if exists. Change-Id: If2903026fe15dc3664591c0e4e472cf5cb2991e4 Signed-off-by: Inki Dae --- diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index 1ac601c..f6f46a2 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.0.1 +Version: 0.0.2 Release: 2 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index 397a029..39b24e9 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -220,6 +220,28 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } + if (mInputInfoHandle) { + ml_tensors_info_destroy(mInputInfoHandle); + mInputInfoHandle = NULL; + } + + err = ml_single_get_input_info(mSingle, &mInputInfoHandle); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_get_input_info(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + if (mOutputInfoHandle) { + ml_tensors_info_destroy(mOutputInfoHandle); + mOutputInfoHandle = NULL; + } + + err = ml_single_get_output_info(mSingle, &mOutputInfoHandle); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_get_output_info(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + err = UpdateTensorsInfo(); if (err != INFERENCE_ENGINE_ERROR_NONE) { ml_single_close(mSingle); @@ -599,84 +621,83 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - if (mInputInfoHandle) { - ml_tensors_info_destroy(mInputInfoHandle); - mInputInfoHandle = NULL; - } - - if (mOutputInfoHandle) { - ml_tensors_info_destroy(mOutputInfoHandle); - mOutputInfoHandle = NULL; - } + unsigned int input_tensor_cnt = 0; - int ret = ml_single_get_input_info(mSingle, &mInputInfoHandle); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_get_input_info(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - unsigned int cnt = 0; - ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt); - if (ret != ML_ERROR_NONE || !cnt) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor count = %u", cnt); - mDesignated_inputs.clear(); - std::map().swap(mDesignated_inputs); - for(unsigned int index = 0; index < cnt; ++index) { - char *in_name = NULL; - ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name); - LOGI("index:%d with name %s", index, in_name); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", - ret); + // If user-given input layer information exists then use it. + if (!mInputProperty.layers.empty()) { + for (auto& iter : mInputProperty.layers) { + LOGI("index:%d with name %s", input_tensor_cnt, iter.first.c_str()); + mDesignated_inputs.insert(std::make_pair(iter.first, input_tensor_cnt)); + input_tensor_cnt++; + } + // Otherwise, request input layer information to tensor filter. + } else { + int ret = ml_tensors_info_get_count(mInputInfoHandle, &input_tensor_cnt); + if (ret != ML_ERROR_NONE || !input_tensor_cnt) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - if (in_name == NULL) - continue; + for(unsigned int index = 0; index < input_tensor_cnt; ++index) { + char *in_name = NULL; + ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name); + LOGI("index:%d with name %s", index, in_name); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } - mDesignated_inputs.insert(std::make_pair(std::string(in_name), index)); - free(in_name); - } + if (in_name == NULL) + continue; - ret = ml_single_get_output_info(mSingle, &mOutputInfoHandle); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_get_output_info(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + mDesignated_inputs.insert(std::make_pair(std::string(in_name), index)); + free(in_name); + } } - cnt = 0; - ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt); - if (ret != ML_ERROR_NONE || cnt == 0) { - LOGE("Failed to request ml_tensors_info_get_count(%d) with cnt %u.", ret, cnt); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + LOGI("input tensor count = %u", input_tensor_cnt); - LOGI("output tensor count = %u", cnt); - mDesignated_outputs.clear(); - std::map().swap(mDesignated_outputs); - for (unsigned int index = 0; index < cnt; ++index) { - char *out_name = NULL; - ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name); - LOGI("index:%u with name %s", index, out_name); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", - ret); + unsigned int output_tensor_cnt = 0; + + // If user-given output layer information exists then use it. + if (!mOutputProperty.layers.empty()) { + int index = 0; + for(auto& iter : mOutputProperty.layers){ + LOGI("index:%d with name %s", index, iter.first.c_str()); + mDesignated_outputs.insert(std::make_pair(iter.first, index)); + index++; + } + + output_tensor_cnt = index; + // Otherwise, request output layer information to tensor filter. + } else { + int ret = ml_tensors_info_get_count(mOutputInfoHandle, &output_tensor_cnt); + if (ret != ML_ERROR_NONE || output_tensor_cnt == 0) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", output_tensor_cnt); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - if (out_name == NULL) - continue; + for (unsigned int index = 0; index < output_tensor_cnt; ++index) { + char *out_name = NULL; - mDesignated_outputs.insert(std::make_pair(std::string(out_name), index)); - if (out_name != NULL){ + ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name); + LOGI("index:%u with name %s", index, out_name); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", + ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + if (out_name == NULL) + continue; + + mDesignated_outputs.insert(std::make_pair(std::string(out_name), index)); free(out_name); } } + LOGI("output tensor count = %u", output_tensor_cnt); + LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; }