mv_machine_learning: clear allocated tensor buffer 67/319067/3
authorInki Dae <inki.dae@samsung.com>
Mon, 3 Feb 2025 05:15:26 +0000 (14:15 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 10 Feb 2025 08:15:07 +0000 (17:15 +0900)
Clear allocated tensor buffer.

Change-Id: I6e00e4d9c9d17790cd09ae5aeb7f9f3e28382b4f
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/inference/src/TensorBuffer.cpp

index accf0939de8efd3871890cf73a876f86ed7ded0b..6cc62c6c808c221d5aed39200066e5f06849b5a7 100644 (file)
@@ -49,17 +49,17 @@ int TensorBuffer::allocate(inference_engine_tensor_buffer &tensor_buffer,
 
        try {
                if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
-                       tensor_buffer.buffer = new float[tensor_info.size];
+                       tensor_buffer.buffer = new float[tensor_info.size]();
                else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64)
-                       tensor_buffer.buffer = new long long[tensor_info.size];
+                       tensor_buffer.buffer = new long long[tensor_info.size]();
                else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
-                       tensor_buffer.buffer = new unsigned long long[tensor_info.size];
+                       tensor_buffer.buffer = new unsigned long long[tensor_info.size]();
                else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
-                       tensor_buffer.buffer = new unsigned int[tensor_info.size];
+                       tensor_buffer.buffer = new unsigned int[tensor_info.size]();
                else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8)
-                       tensor_buffer.buffer = new char[tensor_info.size];
+                       tensor_buffer.buffer = new char[tensor_info.size]();
                else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16)
-                       tensor_buffer.buffer = new unsigned short[tensor_info.size];
+                       tensor_buffer.buffer = new unsigned short[tensor_info.size]();
        } catch (const std::bad_alloc &e) {
                LOGE("Fail to allocate tensor buffer.(%s)", e.what());
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;