mv_machine_learning: correct tensor buffer size 26/287926/3
authorInki Dae <inki.dae@samsung.com>
Wed, 8 Feb 2023 01:25:26 +0000 (10:25 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 8 Feb 2023 04:36:59 +0000 (13:36 +0900)
[Version] : 0.26.7
[Issue type] : bug fix

Correct output tensor buffer size.

In original code, we set the size of inference_engine_tensor_buffer to
the one of inference_engine_tensor_info, and this incurs seg. fault
in case of not using float data type. These sizes are different each other
like below,

Size of inference_engine_tensor_info : tensor element count
Size of inference_engine_tensor_buffer : tensor element count * bytes per
an element.

So this patch calculates the size of inference_engine_tensor_buffer correctly
by checking actual bytes per an tensor element.

Change-Id: I21733fb341e93f325f6d4a3bb4df66ff69e15413
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/inference/include/TensorBuffer.h
mv_machine_learning/inference/src/TensorBuffer.cpp
packaging/capi-media-vision.spec

index 05bbb20..c4654cd 100644 (file)
@@ -41,6 +41,8 @@ class TensorBuffer
 private:
        IETensorBuffer _tensorBuffer;
 
+       size_t getSizeOf(inference_tensor_data_type_e data_type);
+
 public:
        TensorBuffer() = default;
        ~TensorBuffer() = default;
index 2d48f52..2510549 100644 (file)
@@ -65,12 +65,12 @@ int TensorBuffer::allocate(inference_engine_tensor_buffer &tensor_buffer,
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       tensor_buffer.size = tensor_info.size;
-
-       LOGI("Allocated tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
+       tensor_buffer.size = tensor_info.size * getSizeOf(tensor_info.data_type);
        tensor_buffer.owner_is_backend = 0;
        tensor_buffer.data_type = tensor_info.data_type;
 
+       LOGI("Allocated tensor buffer(size = %zu, data type = %d)", tensor_buffer.size, tensor_buffer.data_type);
+
        return MEDIA_VISION_ERROR_NONE;
 }
 
@@ -99,6 +99,23 @@ void TensorBuffer::release()
        IETensorBuffer().swap(_tensorBuffer);
 }
 
+size_t TensorBuffer::getSizeOf(inference_tensor_data_type_e data_type)
+{
+       switch (data_type) {
+       case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+               return sizeof(float);
+       case INFERENCE_TENSOR_DATA_TYPE_INT64:
+       case INFERENCE_TENSOR_DATA_TYPE_UINT64:
+               return sizeof(int64_t);
+       case INFERENCE_TENSOR_DATA_TYPE_UINT32:
+               return sizeof(uint32_t);
+       case INFERENCE_TENSOR_DATA_TYPE_UINT16:
+               return sizeof(u_int16_t);
+       default:
+               return 1;
+       }
+}
+
 size_t TensorBuffer::size()
 {
        return _tensorBuffer.size();
@@ -161,7 +178,8 @@ int TensorBuffer::GetTensorInfo(inference_engine_layer_property &layerProperty,
 
 template<typename T> int TensorBuffer::convertToFloat(inference_engine_tensor_buffer *tensorBuffer)
 {
-       float *new_buf = new (std::nothrow) float[tensorBuffer->size];
+       size_t element_cnt = tensorBuffer->size / getSizeOf(tensorBuffer->data_type);
+       float *new_buf = new (std::nothrow) float[element_cnt];
        if (new_buf == NULL) {
                LOGE("Fail to allocate a new output tensor buffer.");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
@@ -169,7 +187,7 @@ template<typename T> int TensorBuffer::convertToFloat(inference_engine_tensor_bu
 
        auto ori_buf = static_cast<T *>(tensorBuffer->buffer);
 
-       for (size_t idx = 0; idx < tensorBuffer->size; idx++)
+       for (size_t idx = 0; idx < element_cnt; idx++)
                new_buf[idx] = static_cast<float>(ori_buf[idx]) / 255.0f;
 
        // replace original buffer with new one, and release origin one.
@@ -181,6 +199,10 @@ template<typename T> int TensorBuffer::convertToFloat(inference_engine_tensor_bu
        tensorBuffer->data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
        tensorBuffer->owner_is_backend = false;
 
+       // tensorBuffer->buffer has been changed to new one which is converted to float data type.
+       // And its size is tensor element count * sizeof(float) so the size should be calculated again like below,
+       tensorBuffer->size = element_cnt * sizeof(float);
+
        return MEDIA_VISION_ERROR_NONE;
 }
 
index 217969c..619dcba 100644 (file)
@@ -1,6 +1,6 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.26.6
+Version:     0.26.7
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-3-Clause