mv_inference: Update actual tensor buffer size
authorInki Dae <inki.dae@samsung.com>
Thu, 5 Mar 2020 01:05:12 +0000 (10:05 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:19 +0000 (09:42 +0900)
This patch updates actual tensor buffer size(in bytes)
to size member of inference_engine_tensor_buffer structure
so that the actual size can be used by other.

Change-Id: Ifa3688c33f45e105dd2f8c0c173b7003998c6432
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index ffc36043f202660b0accf781223bbe5e515d34a9..03f395564b06645255f57d1265a1d28135c8cec9 100755 (executable)
@@ -506,8 +506,10 @@ int Inference::PrepareTenosrBuffers(void)
                        inference_engine_tensor_buffer tensor_buffer;
                        if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+                               tensor_buffer.size = tensor_info.size * 4;
                        } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
                                tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+                               tensor_buffer.size = tensor_info.size;
                        } else {
                                LOGE("Invalid input tensor data type.");
                                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -548,8 +550,10 @@ int Inference::PrepareTenosrBuffers(void)
                        inference_engine_tensor_buffer tensor_buffer;
                        if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = new float[tensor_info.size];
+                               tensor_buffer.size = tensor_info.size * 4;
                        } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
                                tensor_buffer.buffer = new char[tensor_info.size];
+                               tensor_buffer.size = tensor_info.size;
                        } else {
                                LOGE("Invalid output tensor data type.");
                                CleanupTensorBuffers();