mv_machine_learning: fix memory leak issue 04/279504/2
authorInki Dae <inki.dae@samsung.com>
Wed, 10 Aug 2022 09:21:11 +0000 (18:21 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 10 Aug 2022 23:53:06 +0000 (08:53 +0900)
[Version] : 0.23.13
[Issue type] bug fix.

Fixed a memory leak issue with non-meta file inference by introducing
convertToFloat function of TensorBuffer class, which clears owner_is_backend
so that the buffer allocated with float type can be released properly.

And also this patch drops the code smell, code duplication, and
renames existing function name - setTensorBuffer to addTensorBuffer.

Change-Id: I062aa9cf05fc6ebb53e4cfd0c4ae6bb0d98d65fe
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/inference/include/TensorBuffer.h
mv_machine_learning/inference/src/Inference.cpp
mv_machine_learning/inference/src/TensorBuffer.cpp
packaging/capi-media-vision.spec

index de4beb3..c7d4ba3 100644 (file)
@@ -55,7 +55,10 @@ namespace inference
 
                IETensorBuffer& getAllTensorBuffer();
                inference_engine_tensor_buffer* getTensorBuffer(std::string name);
-               bool setTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer);
+               bool addTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer);
+
+               template <typename T>
+               int convertToFloat(inference_engine_tensor_buffer *tensorBuffer, const inference_engine_tensor_info& tensor_info);
 
                template <typename T>
                T getValue(std::string name, int idx);
index 67af6b1..1616ace 100755 (executable)
@@ -691,7 +691,7 @@ namespace inference
                                        return ret;
                                }
 
-                               mInputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
+                               mInputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
                        }
                }
 
@@ -724,7 +724,7 @@ namespace inference
                                        return ret;
                                }
 
-                               mOutputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
+                               mOutputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
                        }
                }
 
@@ -745,57 +745,31 @@ namespace inference
 
                        outputData.dimInfo.push_back(tmpDimInfo);
 
-                       inference_engine_tensor_buffer* tensorBuffers =
+                       inference_engine_tensor_buffer* tensorBuffer =
                                                                mOutputTensorBuffers.getTensorBuffer(layer.first);
-                       if (tensorBuffers == NULL) {
+                       if (tensorBuffer == NULL) {
                                LOGE("Fail to getTensorBuffer with name %s", layer.first.c_str());
                                return MEDIA_VISION_ERROR_INVALID_OPERATION;
                        }
 
                        // Normalize output tensor data converting it to float type in case of quantized model.
                        if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                               float *new_buf = new float[tensor_info.size];
-                               if (new_buf == NULL) {
-                                       LOGE("Fail to allocate a new output tensor buffer.");
-                                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-                               }
-
-                               auto *ori_buf = static_cast<unsigned char *>(
-                                               tensorBuffers->buffer);
-
-                               for (size_t j = 0; j < tensor_info.size; j++) {
-                                       new_buf[j] = static_cast<float>(ori_buf[j]) / 255.0f;
+                               int ret = mOutputTensorBuffers.convertToFloat<unsigned char>(tensorBuffer, tensor_info);
+                               if (ret != MEDIA_VISION_ERROR_NONE) {
+                                       LOGE("Fail to convert tensor data to float type.");
+                                       return ret;
                                }
-
-                               // replace original buffer with new one, and release origin one.
-                               tensorBuffers->buffer = new_buf;
-
-                               if (!tensorBuffers->owner_is_backend)
-                                       delete[] ori_buf;
                        }
 
                        if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
-                               float *new_buf = new float[tensor_info.size];
-                               if (new_buf == NULL) {
-                                       LOGE("Fail to allocate a new output tensor buffer.");
-                                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-                               }
-
-                               auto *ori_buf =
-                                               static_cast<short *>(tensorBuffers->buffer);
-
-                               for (size_t j = 0; j < tensor_info.size; j++) {
-                                       new_buf[j] = static_cast<float>(ori_buf[j]);
+                               int ret = mOutputTensorBuffers.convertToFloat<unsigned short>(tensorBuffer, tensor_info);
+                               if (ret != MEDIA_VISION_ERROR_NONE) {
+                                       LOGE("Fail to convert tensor data to float type.");
+                                       return ret;
                                }
-
-                               // replace original buffer with new one, and release origin one.
-                               tensorBuffers->buffer = new_buf;
-
-                               if (!tensorBuffers->owner_is_backend)
-                                       delete[] ori_buf;
                        }
 
-                       outputData.data.push_back(static_cast<void *>(tensorBuffers->buffer));
+                       outputData.data.push_back(static_cast<void *>(tensorBuffer->buffer));
                }
 
                return MEDIA_VISION_ERROR_NONE;
index 383ba50..e08e5d8 100644 (file)
@@ -120,7 +120,7 @@ namespace inference
                return &mTensorBuffer[name];
        }
 
-       bool TensorBuffer::setTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
+       bool TensorBuffer::addTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
        {
                if (name.empty() || buffer.buffer == nullptr) {
                        LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer);
@@ -137,6 +137,32 @@ namespace inference
        }
 
        template <typename T>
+       int TensorBuffer::convertToFloat(inference_engine_tensor_buffer *tensorBuffer, const inference_engine_tensor_info& tensor_info)
+       {
+               float *new_buf = new(std::nothrow) float[tensor_info.size];
+               if (new_buf == NULL) {
+                       LOGE("Fail to allocate a new output tensor buffer.");
+                       return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+               }
+
+               auto ori_buf = static_cast<T *>(tensorBuffer->buffer);
+
+               for (size_t idx = 0; idx < tensor_info.size; idx++)
+                       new_buf[idx] = static_cast<float>(ori_buf[idx]) / 255.0f;
+
+               // replace original buffer with new one, and release origin one.
+               tensorBuffer->buffer = new_buf;
+
+               if (!tensorBuffer->owner_is_backend)
+                       delete[] ori_buf;
+
+               tensorBuffer->data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+               tensorBuffer->owner_is_backend = false;
+
+               return MEDIA_VISION_ERROR_NONE;
+       }
+
+       template <typename T>
        T TensorBuffer::getValue(std::string name, int idx)
        {
                inference_engine_tensor_buffer* tBuffer =
@@ -169,5 +195,8 @@ namespace inference
 
        template float TensorBuffer::getValue<float>(std::string, int);
        template int TensorBuffer::getValue<int>(std::string, int);
+       template int TensorBuffer::convertToFloat<unsigned char>(_inference_engine_tensor_buffer*, _inference_engine_tensor_info const&);
+       template int TensorBuffer::convertToFloat<unsigned short>(_inference_engine_tensor_buffer*, _inference_engine_tensor_info const&);
+
 } /* Inference */
 } /* MediaVision */
index 79838c2..871bd15 100644 (file)
@@ -1,6 +1,6 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.23.12
+Version:     0.23.13
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-3-Clause