From: Inki Dae Date: Tue, 7 Sep 2021 03:59:07 +0000 (+0900) Subject: mv_machine_learning: introduce allocate function for TensorBuffer class X-Git-Tag: submit/tizen/20210924.044804~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a80a0852ed96593b8aa149f40e3dcb784d760058;p=platform%2Fcore%2Fapi%2Fmediavision.git mv_machine_learning: introduce allocate function for TensorBuffer class Added allocate function to allocate buffers for input and output tensors. This patch drops code duplication - same code was used to allocate input and output tensor buffers. Change-Id: I0cfb46633a9a3fe74c734307ac9c15b5df064e63 Signed-off-by: Inki Dae --- diff --git a/mv_machine_learning/mv_inference/inference/include/TensorBuffer.h b/mv_machine_learning/mv_inference/inference/include/TensorBuffer.h index 9054ec7b..de4beb31 100644 --- a/mv_machine_learning/mv_inference/inference/include/TensorBuffer.h +++ b/mv_machine_learning/mv_inference/inference/include/TensorBuffer.h @@ -48,7 +48,9 @@ namespace inference bool empty(); bool exist(std::string name); - void clear(); + int allocate(inference_engine_tensor_buffer& tensor_buffer, + const inference_engine_tensor_info& tensor_info); + void release(); size_t size(); IETensorBuffer& getAllTensorBuffer(); diff --git a/mv_machine_learning/mv_inference/inference/src/Inference.cpp b/mv_machine_learning/mv_inference/inference/src/Inference.cpp index 835bc6fa..68db6054 100755 --- a/mv_machine_learning/mv_inference/inference/src/Inference.cpp +++ b/mv_machine_learning/mv_inference/inference/src/Inference.cpp @@ -637,11 +637,11 @@ namespace inference LOGI("ENTER"); if (!mInputTensorBuffers.empty()) { - mInputTensorBuffers.clear(); + mInputTensorBuffers.release(); } if (!mOutputTensorBuffers.empty()) { - mOutputTensorBuffers.clear(); + mOutputTensorBuffers.release(); } LOGI("LEAVE"); @@ -685,34 +685,15 @@ namespace inference // then allocate the buffers at here. if (mInputTensorBuffers.empty()) { for(auto& layer : mInputLayerProperty.layers) { - const inference_engine_tensor_info& tensor_info = layer.second; inference_engine_tensor_buffer tensor_buffer; - if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { - tensor_buffer.buffer = new float[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_UINT8) { - tensor_buffer.buffer = new unsigned char[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_UINT16) { - tensor_buffer.buffer = new unsigned short[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else { - LOGE("Invalid input tensor data type."); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - if (tensor_buffer.buffer == NULL) { - LOGE("Fail to allocate input tensor buffer."); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("Fail to allocate tensor buffer."); + mInputTensorBuffers.release(); + return ret; } - LOGI("Allocated input tensor buffer(size = %zu, data type = %d)", - tensor_info.size, tensor_info.data_type); - tensor_buffer.owner_is_backend = 0; - tensor_buffer.data_type = tensor_info.data_type; mInputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer); } } @@ -737,45 +718,15 @@ namespace inference // then allocate the buffers at here. if (mOutputTensorBuffers.empty()) { for (auto& layer : mOutputLayerProperty.layers) { - const inference_engine_tensor_info& tensor_info = layer.second; inference_engine_tensor_buffer tensor_buffer; - if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { - tensor_buffer.buffer = new float[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_INT64) { - tensor_buffer.buffer = new long long[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_UINT32) { - tensor_buffer.buffer = new unsigned int[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_UINT8) { - tensor_buffer.buffer = new char[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_UINT16) { - tensor_buffer.buffer = new unsigned short[tensor_info.size]; - tensor_buffer.size = tensor_info.size; - } else { - LOGE("Invalid output tensor data type."); - CleanupTensorBuffers(); - return MEDIA_VISION_ERROR_INVALID_PARAMETER; - } - if (tensor_buffer.buffer == NULL) { - LOGE("Fail to allocate output tensor buffer."); - CleanupTensorBuffers(); - return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("Fail to allocate tensor buffer."); + mInputTensorBuffers.release(); + return ret; } - LOGI("Allocated output tensor buffer(size = %zu, data type = %d)", - tensor_info.size, tensor_info.data_type); - - tensor_buffer.owner_is_backend = 0; - tensor_buffer.data_type = tensor_info.data_type; mOutputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer); } } diff --git a/mv_machine_learning/mv_inference/inference/src/TensorBuffer.cpp b/mv_machine_learning/mv_inference/inference/src/TensorBuffer.cpp index 6e4fc30c..383ba508 100644 --- a/mv_machine_learning/mv_inference/inference/src/TensorBuffer.cpp +++ b/mv_machine_learning/mv_inference/inference/src/TensorBuffer.cpp @@ -39,7 +39,44 @@ namespace inference return getTensorBuffer(name) != nullptr; } - void TensorBuffer::clear() + int TensorBuffer::allocate(inference_engine_tensor_buffer& tensor_buffer, + const inference_engine_tensor_info& tensor_info) + { + if (tensor_info.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE || + tensor_info.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) { + LOGE("Invalid tensor data type."); + return MEDIA_VISION_ERROR_INVALID_PARAMETER; + } + + try { + if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) + tensor_buffer.buffer = new float[tensor_info.size]; + else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) + tensor_buffer.buffer = new long long[tensor_info.size]; + else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64) + tensor_buffer.buffer = new unsigned long long[tensor_info.size]; + else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32) + tensor_buffer.buffer = new unsigned int[tensor_info.size]; + else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) + tensor_buffer.buffer = new char[tensor_info.size]; + else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) + tensor_buffer.buffer = new unsigned short[tensor_info.size]; + } catch (const std::bad_alloc& e) { + LOGE("Fail to allocate tensor buffer.(%s)", e.what()); + return MEDIA_VISION_ERROR_OUT_OF_MEMORY; + } + + tensor_buffer.size = tensor_info.size; + + LOGI("Allocated tensor buffer(size = %zu, data type = %d)", + tensor_info.size, tensor_info.data_type); + tensor_buffer.owner_is_backend = 0; + tensor_buffer.data_type = tensor_info.data_type; + + return MEDIA_VISION_ERROR_NONE; + } + + void TensorBuffer::release() { for (auto& tensorBuffer : mTensorBuffer) { auto& tBuffer = tensorBuffer.second; @@ -85,8 +122,7 @@ namespace inference bool TensorBuffer::setTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer) { - if (name.empty() || - buffer.buffer == nullptr) { + if (name.empty() || buffer.buffer == nullptr) { LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer); return false; }