From b172770839391647fb014703ef20520b9cc91e2b Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 10 Mar 2021 18:12:57 +0900 Subject: [PATCH] Change members of inference_engine_layer_property structure, and change vector to map This is based on https://review.tizen.org/gerrit/#/c/platform/core/multimedia/inference-engine-interface/+/254892/ https://review.tizen.org/gerrit/#/c/platform/core/api/mediavision/+/254953/ Change-Id: I93eaa87c9ed5492bb308cb1ec0a35e86fd5b06dd Signed-off-by: Tae-Young Chung --- packaging/inference-engine-mlapi.spec | 2 +- src/inference_engine_mlapi.cpp | 196 ++++++++++++++++------------------ src/inference_engine_mlapi_private.h | 16 +-- 3 files changed, 104 insertions(+), 110 deletions(-) diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index 02818ae..1ac601c 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,7 +1,7 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision Version: 0.0.1 -Release: 1 +Release: 2 Group: Multimedia/Libraries License: Apache-2.0 ExclusiveArch: %{arm} aarch64 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index c11a337..213bc8d 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -55,10 +55,10 @@ namespace MLAPIImpl InferenceMLAPI::~InferenceMLAPI() { mDesignated_inputs.clear(); - std::vector().swap(mDesignated_inputs); + std::map().swap(mDesignated_inputs); mDesignated_outputs.clear(); - std::vector().swap(mDesignated_outputs); + std::map().swap(mDesignated_outputs); ml_single_close(mSingle); @@ -94,7 +94,7 @@ namespace MLAPIImpl } mPluginType = type; - + LOGI("backend type.(%d)", type); LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; @@ -232,7 +232,7 @@ namespace MLAPIImpl } int InferenceMLAPI::GetInputTensorBuffers( - std::vector &buffers) + std::map &buffers) { LOGI("ENTER"); @@ -244,16 +244,7 @@ namespace MLAPIImpl buffers.clear(); - int ret; - unsigned int cnt; - - ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor count = %u", cnt); + int ret = INFERENCE_ENGINE_ERROR_NONE; // TODO. Below is test code, should we allocate new buffer for every inference? if (mInputDataHandle == NULL) { @@ -265,11 +256,11 @@ namespace MLAPIImpl } // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() - for (unsigned int i = 0; i < cnt; ++i) { + for (auto& input : mDesignated_inputs) { inference_engine_tensor_buffer in_buffer; ml_tensor_type_e in_type; - ret = ml_tensors_data_get_tensor_data(mInputDataHandle, i, &in_buffer.buffer, &in_buffer.size); + ret = ml_tensors_data_get_tensor_data(mInputDataHandle, input.second, &in_buffer.buffer, &in_buffer.size); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -277,7 +268,7 @@ namespace MLAPIImpl LOGE("buffer = %p, size = %zu\n", in_buffer.buffer, in_buffer.size); - ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, i, &in_type); + ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -293,7 +284,7 @@ namespace MLAPIImpl in_buffer.data_type = static_cast(type); in_buffer.owner_is_backend = 1; - buffers.push_back(in_buffer); + buffers.insert(std::make_pair(input.first, in_buffer)); } LOGI("LEAVE"); @@ -302,7 +293,7 @@ namespace MLAPIImpl } int InferenceMLAPI::GetOutputTensorBuffers( - std::vector &buffers) + std::map &buffers) { LOGI("ENTER"); @@ -314,16 +305,7 @@ namespace MLAPIImpl buffers.clear(); - int ret; - unsigned int cnt; - - ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("output tensor count = %u", cnt); + int ret = INFERENCE_ENGINE_ERROR_NONE; // TODO. Below is test code, should we allocate new buffer for every inference? if (mOutputDataHandle == NULL) { @@ -335,11 +317,11 @@ namespace MLAPIImpl } // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo() - for (unsigned int i = 0; i < cnt; ++i) { + for (auto& output : mDesignated_outputs) { inference_engine_tensor_buffer out_buffer; ml_tensor_type_e out_type; - ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, i, &out_buffer.buffer, &out_buffer.size); + ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, output.second, &out_buffer.buffer, &out_buffer.size); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -347,7 +329,7 @@ namespace MLAPIImpl LOGE("buffer = %p, size = %zu\n", out_buffer.buffer, out_buffer.size); - ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, i, &out_type); + ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; @@ -363,7 +345,7 @@ namespace MLAPIImpl out_buffer.data_type = static_cast(type); out_buffer.owner_is_backend = 1; - buffers.push_back(out_buffer); + buffers.insert(std::make_pair(output.first, out_buffer)); } LOGI("LEAVE"); @@ -377,25 +359,15 @@ namespace MLAPIImpl LOGI("ENTER"); // TODO. Need to check if model file loading is done. - int ret; - unsigned int cnt; + int ret = INFERENCE_ENGINE_ERROR_NONE; - ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor count = %u", cnt); - - for (unsigned int i = 0; i < cnt; ++i) { + for (auto& input : mDesignated_inputs) { inference_engine_tensor_info tensor_info; ml_tensor_type_e in_type; ml_tensor_dimension in_dim; - char *in_name = NULL; size_t in_size = 1; - ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, i, &in_type); + ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); @@ -409,7 +381,7 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } - ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, i, in_dim); + ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, input.second, in_dim); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret); @@ -425,25 +397,16 @@ namespace MLAPIImpl LOGI("input tensor size = %zu", in_size); - ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, i, &in_name); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", - ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor name = %s", in_name); + LOGI("input tensor name = %s", input.first.c_str()); tensor_info.data_type = static_cast(type); tensor_info.size = in_size; - property.tensor_infos.push_back(tensor_info); + property.layers.insert(std::make_pair(input.first, tensor_info)); // TODO. Compare tensor info from engine to one from a given property. } - property.layer_names = mInputProperty.layer_names; - LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; @@ -455,25 +418,15 @@ namespace MLAPIImpl LOGI("ENTER"); // TODO. Need to check if model file loading is done. - int ret; - unsigned int cnt; + int ret = INFERENCE_ENGINE_ERROR_NONE; - ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("output tensor count = %u", cnt); - - for (unsigned int i = 0; i < cnt; ++i) { + for (auto& output : mDesignated_outputs) { inference_engine_tensor_info tensor_info; ml_tensor_type_e out_type; unsigned int out_dim[ML_TENSOR_RANK_LIMIT]; - char *out_name = NULL; size_t out_size = 1; - ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, i, &out_type); + ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); @@ -487,7 +440,7 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } - ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, i, out_dim); + ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, output.second, out_dim); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret); @@ -518,25 +471,16 @@ namespace MLAPIImpl LOGI("output tensor size = %zu", out_size); - ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, i, &out_name); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", - ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("output tensor name = %s", out_name); + LOGI("output tensor name = %s", output.first.c_str()); tensor_info.data_type = static_cast(type); tensor_info.size = out_size; - property.tensor_infos.push_back(tensor_info); + property.layers.insert(std::make_pair(output.first, tensor_info)); // TODO. Compare tensor info from engine to one from a given property. } - property.layer_names = mOutputProperty.layer_names; - LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; @@ -547,20 +491,17 @@ namespace MLAPIImpl { LOGI("ENTER"); - std::vector::iterator iter; - for (iter = property.layer_names.begin(); - iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("input layer name = %s", name.c_str()); + for (auto& layer : property.layers) { + LOGI("input layer name = %s", layer.first.c_str()); } mDesignated_inputs.clear(); - std::vector().swap(mDesignated_inputs); + std::map().swap(mDesignated_inputs); // TODO. Request input property information to a given ML Single API of nnstreamer backend, // and set it instead of user-given one, // Call UpdateTensorsInfo() after requesting input info. - mDesignated_inputs = property.layer_names; + mInputProperty = property; LOGI("LEAVE"); @@ -573,20 +514,17 @@ namespace MLAPIImpl { LOGI("ENTER"); - std::vector::iterator iter; - for (iter = property.layer_names.begin(); - iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("output layer name = %s", name.c_str()); + for (auto& layer : property.layers) { + LOGI("output layer name = %s", layer.first.c_str()); } mDesignated_outputs.clear(); - std::vector().swap(mDesignated_outputs); + std::map().swap(mDesignated_outputs); // TODO. Request output property information to a given ML Single API of nnstreamer backend, // and set it instead of user-given one, // Call UpdateTensorsInfo() after requesting output info. - mDesignated_outputs = property.layer_names; + mOutputProperty = property; LOGI("LEAVE"); @@ -617,8 +555,8 @@ namespace MLAPIImpl } int InferenceMLAPI::CheckTensorBuffers( - std::vector &input_buffers, - std::vector &output_buffers) + std::map &input_buffers, + std::map &output_buffers) { LOGI("ENTER"); @@ -677,19 +615,75 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } + unsigned int cnt = 0; + ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt); + if (ret != ML_ERROR_NONE || !cnt) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor count = %u", cnt); + mDesignated_inputs.clear(); + std::map().swap(mDesignated_inputs); + for(unsigned int index = 0; index < cnt; ++index) { + char *in_name = NULL; + ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name); + LOGI("index:%d with name %s", index, in_name); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", + ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + if (in_name == NULL) + continue; + + mDesignated_inputs.insert(std::make_pair(std::string(in_name), index)); + free(in_name); + } + ret = ml_single_get_output_info(mSingle, &mOutputInfoHandle); if (ret != ML_ERROR_NONE) { LOGE("Failed to request ml_single_get_output_info(%d).", ret); return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } + cnt = 0; + ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt); + if (ret != ML_ERROR_NONE || cnt == 0) { + LOGE("Failed to request ml_tensors_info_get_count(%d) with cnt %u.", ret, cnt); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("output tensor count = %u", cnt); + mDesignated_outputs.clear(); + std::map().swap(mDesignated_outputs); + for (unsigned int index = 0; index < cnt; ++index) { + char *out_name = NULL; + ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name); + LOGI("index:%u with name %s", index, out_name); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", + ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + if (out_name == NULL) + continue; + + mDesignated_outputs.insert(std::make_pair(std::string(out_name), index)); + if (out_name != NULL){ + free(out_name); + } + } + LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; } int InferenceMLAPI::Run( - std::vector &input_buffers, - std::vector &output_buffers) + std::map &input_buffers, + std::map &output_buffers) { LOGI("ENTER"); diff --git a/src/inference_engine_mlapi_private.h b/src/inference_engine_mlapi_private.h index 5f9fa32..fe39594 100644 --- a/src/inference_engine_mlapi_private.h +++ b/src/inference_engine_mlapi_private.h @@ -51,10 +51,10 @@ namespace MLAPIImpl inference_model_format_e model_format) override; int GetInputTensorBuffers( - std::vector &buffers) override; + std::map &buffers) override; int GetOutputTensorBuffers( - std::vector &buffers) override; + std::map &buffers) override; int GetInputLayerProperty( inference_engine_layer_property &property) override; @@ -70,14 +70,14 @@ namespace MLAPIImpl int GetBackendCapacity(inference_engine_capacity *capacity) override; - int Run(std::vector &input_buffers, - std::vector &output_buffers) + int Run(std::map &input_buffers, + std::map &output_buffers) override; private: int CheckTensorBuffers( - std::vector &input_buffers, - std::vector &output_buffers); + std::map &input_buffers, + std::map &output_buffers); int ConvertTensorType(int tensor_type); int UpdateTensorsInfo(); @@ -88,8 +88,8 @@ namespace MLAPIImpl ml_tensors_info_h mOutputInfoHandle; ml_tensors_data_h mInputDataHandle; ml_tensors_data_h mOutputDataHandle; - std::vector mDesignated_inputs; - std::vector mDesignated_outputs; + std::map mDesignated_inputs; + std::map mDesignated_outputs; inference_engine_layer_property mInputProperty; inference_engine_layer_property mOutputProperty; }; -- 2.7.4