From 2d1356841e56eece0f52305dcd4023f214e08ac1 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Fri, 12 Aug 2022 15:34:17 +0900 Subject: [PATCH] drop code duplication [Version] : 0.4.5-0 [Issue type] : code cleanup Drop the code duplication for cleanup. Change-Id: Ic41991cb335b12bd21a6caba159b5b2bc5881576 Signed-off-by: Inki Dae --- packaging/inference-engine-mlapi.spec | 2 +- src/inference_engine_mlapi.cpp | 110 ++++++++++++---------------------- src/inference_engine_mlapi_private.h | 3 + 3 files changed, 43 insertions(+), 72 deletions(-) diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index 1bf2368..d37f2c9 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.4.4 +Version: 0.4.5 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index b0ee2f5..b23ffe3 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -387,54 +387,29 @@ namespace MLAPIImpl return err; } - int InferenceMLAPI::GetInputTensorBuffers( - std::map &buffers) + int InferenceMLAPI::GetTensorInfo(std::map& designated_layers, + std::map &buffers, + ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle) { - LOGI("ENTER"); - - // TODO. Implement this function according to a given ML Single API backend properly. - - // ML Single API will always provide internal tensor buffers so - // get the tensor buffers back to Mediavision framework so that - // Mediavision framework doesn't allocate the tensor buffers internally. - - buffers.clear(); - - int ret = INFERENCE_ENGINE_ERROR_NONE; - - // TODO. Below is test code, should we allocate new buffer for every inference? - if (mInputDataHandle == NULL) { - ret = ml_tensors_data_create(mInputInfoHandle, &mInputDataHandle); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_create(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - } - - // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() - for (auto& input : mDesignated_inputs) { + for (auto& layer : designated_layers) { inference_engine_tensor_buffer in_buffer; ml_tensor_type_e in_type; - ret = ml_tensors_data_get_tensor_data(mInputDataHandle, input.second, &in_buffer.buffer, &in_buffer.size); + int ret = ml_tensors_data_get_tensor_data(dataHandle, layer.second, &in_buffer.buffer, &in_buffer.size); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret); - ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } LOGE("buffer = %p, size = %zu\n", in_buffer.buffer, in_buffer.size); - ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type); + ret = ml_tensors_info_get_tensor_type(infoHandle, layer.second, &in_type); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); - ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - LOGI("input tensor type = %d", in_type); + LOGI("tensor type = %d", in_type); int type = 0; @@ -442,28 +417,24 @@ namespace MLAPIImpl type = ConvertTensorTypeToInternal(in_type); } catch (const std::invalid_argument& ex) { LOGE("Error (%s) (%d)", ex.what(), in_type); - ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } in_buffer.data_type = static_cast(type); in_buffer.owner_is_backend = 1; - buffers.insert(std::make_pair(input.first, in_buffer)); + buffers.insert(std::make_pair(layer.first, in_buffer)); } - LOGI("LEAVE"); - return INFERENCE_ENGINE_ERROR_NONE; } - int InferenceMLAPI::GetOutputTensorBuffers( + int InferenceMLAPI::GetInputTensorBuffers( std::map &buffers) { LOGI("ENTER"); - // TODO. Need to check if model file loading is done. + // TODO. Implement this function according to a given ML Single API backend properly. // ML Single API will always provide internal tensor buffers so // get the tensor buffers back to Mediavision framework so that @@ -474,8 +445,8 @@ namespace MLAPIImpl int ret = INFERENCE_ENGINE_ERROR_NONE; // TODO. Below is test code, should we allocate new buffer for every inference? - if (mOutputDataHandle == NULL) { - ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle); + if (mInputDataHandle == NULL) { + ret = ml_tensors_data_create(mInputInfoHandle, &mInputDataHandle); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_data_create(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -483,47 +454,44 @@ namespace MLAPIImpl } // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() - for (auto& output : mDesignated_outputs) { - inference_engine_tensor_buffer out_buffer; - ml_tensor_type_e out_type; - - ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, output.second, &out_buffer.buffer, &out_buffer.size); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret); - ml_tensors_data_destroy(mOutputDataHandle); + ret = GetTensorInfo(mDesignated_inputs, buffers, mInputDataHandle, mInputInfoHandle); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + ml_tensors_data_destroy(mInputDataHandle); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + LOGI("LEAVE"); - LOGE("buffer = %p, size = %zu\n", out_buffer.buffer, out_buffer.size); + return INFERENCE_ENGINE_ERROR_NONE; + } - ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); - ml_tensors_data_destroy(mOutputDataHandle); + int InferenceMLAPI::GetOutputTensorBuffers( + std::map &buffers) + { + LOGI("ENTER"); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } + // TODO. Need to check if model file loading is done. - LOGI("output tensor type = %d", out_type); + // ML Single API will always provide internal tensor buffers so + // get the tensor buffers back to Mediavision framework so that + // Mediavision framework doesn't allocate the tensor buffers internally. - int type = 0; + buffers.clear(); - try { - type = ConvertTensorTypeToInternal(out_type); - } catch (const std::invalid_argument& ex) { - LOGE("Error (%s) (%d)", ex.what(), out_type); - ml_tensors_data_destroy(mOutputDataHandle); + int ret = INFERENCE_ENGINE_ERROR_NONE; - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + // TODO. Below is test code, should we allocate new buffer for every inference? + if (mOutputDataHandle == NULL) { + ret = ml_tensors_data_create(mOutputInfoHandle, &mOutputDataHandle); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_create(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - - out_buffer.data_type = static_cast(type); - out_buffer.owner_is_backend = 1; - - buffers.insert(std::make_pair(output.first, out_buffer)); } + // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() + ret = GetTensorInfo(mDesignated_outputs, buffers, mOutputDataHandle, mOutputInfoHandle); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + ml_tensors_data_destroy(mOutputDataHandle); + LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; diff --git a/src/inference_engine_mlapi_private.h b/src/inference_engine_mlapi_private.h index be74a6c..60d2570 100644 --- a/src/inference_engine_mlapi_private.h +++ b/src/inference_engine_mlapi_private.h @@ -91,6 +91,9 @@ namespace MLAPIImpl std::tuple GetNNFWInfo(); std::string GetModelPath(const std::vector& model_paths); const char *GetCustomProp(); + int GetTensorInfo(std::map& designated_layers, + std::map &buffers, + ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle); int mPluginType; int mTargetDevice; -- 2.7.4