mv_machine_learning: introduce allocate function for TensorBuffer class
authorInki Dae <inki.dae@samsung.com>
Tue, 7 Sep 2021 03:59:07 +0000 (12:59 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 9 Sep 2021 01:07:04 +0000 (10:07 +0900)
Added allocate function to allocate buffers for input and output tensors.
This patch drops code duplication - same code was used to allocate
input and output tensor buffers.

Change-Id: I0cfb46633a9a3fe74c734307ac9c15b5df064e63
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/mv_inference/inference/include/TensorBuffer.h
mv_machine_learning/mv_inference/inference/src/Inference.cpp
mv_machine_learning/mv_inference/inference/src/TensorBuffer.cpp

index 9054ec7..de4beb3 100644 (file)
@@ -48,7 +48,9 @@ namespace inference
 
                bool empty();
                bool exist(std::string name);
-               void clear();
+               int allocate(inference_engine_tensor_buffer& tensor_buffer,
+                                        const inference_engine_tensor_info& tensor_info);
+               void release();
                size_t size();
 
                IETensorBuffer& getAllTensorBuffer();
index 835bc6f..68db605 100755 (executable)
@@ -637,11 +637,11 @@ namespace inference
                LOGI("ENTER");
 
                if (!mInputTensorBuffers.empty()) {
-                       mInputTensorBuffers.clear();
+                       mInputTensorBuffers.release();
                }
 
                if (!mOutputTensorBuffers.empty()) {
-                       mOutputTensorBuffers.clear();
+                       mOutputTensorBuffers.release();
                }
 
                LOGI("LEAVE");
@@ -685,34 +685,15 @@ namespace inference
                // then allocate the buffers at here.
                if (mInputTensorBuffers.empty()) {
                        for(auto& layer : mInputLayerProperty.layers) {
-                               const inference_engine_tensor_info& tensor_info = layer.second;
                                inference_engine_tensor_buffer tensor_buffer;
-                               if (tensor_info.data_type ==
-                                       INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
-                                       tensor_buffer.buffer = new float[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else if (tensor_info.data_type ==
-                                                  INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                                       tensor_buffer.buffer = new unsigned char[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else if (tensor_info.data_type ==
-                                                  INFERENCE_TENSOR_DATA_TYPE_UINT16) {
-                                       tensor_buffer.buffer = new unsigned short[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else {
-                                       LOGE("Invalid input tensor data type.");
-                                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                               }
 
-                               if (tensor_buffer.buffer == NULL) {
-                                       LOGE("Fail to allocate input tensor buffer.");
-                                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+                               ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
+                               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                                       LOGE("Fail to allocate tensor buffer.");
+                                       mInputTensorBuffers.release();
+                                       return ret;
                                }
 
-                               LOGI("Allocated input tensor buffer(size = %zu, data type = %d)",
-                                        tensor_info.size, tensor_info.data_type);
-                               tensor_buffer.owner_is_backend = 0;
-                               tensor_buffer.data_type = tensor_info.data_type;
                                mInputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
                        }
                }
@@ -737,45 +718,15 @@ namespace inference
                // then allocate the buffers at here.
                if (mOutputTensorBuffers.empty()) {
                        for (auto& layer : mOutputLayerProperty.layers) {
-                               const inference_engine_tensor_info& tensor_info = layer.second;
                                inference_engine_tensor_buffer tensor_buffer;
-                               if (tensor_info.data_type ==
-                                       INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
-                                       tensor_buffer.buffer = new float[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else if (tensor_info.data_type ==
-                                       INFERENCE_TENSOR_DATA_TYPE_INT64) {
-                                       tensor_buffer.buffer = new long long[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else if (tensor_info.data_type ==
-                                       INFERENCE_TENSOR_DATA_TYPE_UINT32) {
-                                       tensor_buffer.buffer = new unsigned int[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else if (tensor_info.data_type ==
-                                       INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                                       tensor_buffer.buffer = new char[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else if (tensor_info.data_type ==
-                                                  INFERENCE_TENSOR_DATA_TYPE_UINT16) {
-                                       tensor_buffer.buffer = new unsigned short[tensor_info.size];
-                                       tensor_buffer.size = tensor_info.size;
-                               } else {
-                                       LOGE("Invalid output tensor data type.");
-                                       CleanupTensorBuffers();
-                                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-                               }
 
-                               if (tensor_buffer.buffer == NULL) {
-                                       LOGE("Fail to allocate output tensor buffer.");
-                                       CleanupTensorBuffers();
-                                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+                               ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
+                               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                                       LOGE("Fail to allocate tensor buffer.");
+                                       mInputTensorBuffers.release();
+                                       return ret;
                                }
 
-                               LOGI("Allocated output tensor buffer(size = %zu, data type = %d)",
-                                        tensor_info.size, tensor_info.data_type);
-
-                               tensor_buffer.owner_is_backend = 0;
-                               tensor_buffer.data_type = tensor_info.data_type;
                                mOutputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
                        }
                }
index 6e4fc30..383ba50 100644 (file)
@@ -39,7 +39,44 @@ namespace inference
                return getTensorBuffer(name) != nullptr;
        }
 
-       void TensorBuffer::clear()
+       int TensorBuffer::allocate(inference_engine_tensor_buffer& tensor_buffer,
+                                                          const inference_engine_tensor_info& tensor_info)
+       {
+               if (tensor_info.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
+                               tensor_info.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
+                       LOGE("Invalid tensor data type.");
+                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               }
+
+               try {
+                       if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+                               tensor_buffer.buffer = new float[tensor_info.size];
+                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64)
+                               tensor_buffer.buffer = new long long[tensor_info.size];
+                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
+                               tensor_buffer.buffer = new unsigned long long[tensor_info.size];
+                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
+                               tensor_buffer.buffer = new unsigned int[tensor_info.size];
+                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8)
+                               tensor_buffer.buffer = new char[tensor_info.size];
+                       else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16)
+                               tensor_buffer.buffer = new unsigned short[tensor_info.size];
+               }  catch (const std::bad_alloc& e) {
+                       LOGE("Fail to allocate tensor buffer.(%s)", e.what());
+                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+               }
+
+               tensor_buffer.size = tensor_info.size;
+
+               LOGI("Allocated tensor buffer(size = %zu, data type = %d)",
+                               tensor_info.size, tensor_info.data_type);
+               tensor_buffer.owner_is_backend = 0;
+               tensor_buffer.data_type = tensor_info.data_type;
+
+               return MEDIA_VISION_ERROR_NONE;
+       }
+
+       void TensorBuffer::release()
        {
                for (auto& tensorBuffer : mTensorBuffer) {
                        auto& tBuffer = tensorBuffer.second;
@@ -85,8 +122,7 @@ namespace inference
 
        bool TensorBuffer::setTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
        {
-               if (name.empty() ||
-                       buffer.buffer == nullptr) {
+               if (name.empty() || buffer.buffer == nullptr) {
                        LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer);
                        return false;
                }