mv_inference: Add uint64 output tensor support
authorInki Dae <inki.dae@samsung.com>
Fri, 29 May 2020 06:15:57 +0000 (15:15 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 29 May 2020 06:15:57 +0000 (15:15 +0900)
Change-Id: Ieda8df82ae27cf0120dd8a463bbed7aa436f900d
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index 3b425563f9234e0497bee6b85d3684223e43af02..c7b5d2f280f035be9221e53a70b2266277222bd1 100755 (executable)
@@ -524,6 +524,8 @@ void Inference::CleanupTensorBuffers(void)
                                delete[] (float *)tensor_buffer.buffer;
                        else if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
                                delete[] (unsigned int *)tensor_buffer.buffer;
+                       else if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
+                               delete[] (unsigned long long *)tensor_buffer.buffer;
                        else
                                delete[] (unsigned char *)tensor_buffer.buffer;
                }
@@ -619,6 +621,9 @@ int Inference::PrepareTenosrBuffers(void)
                        if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = new float[tensor_info.size];
                                tensor_buffer.size = tensor_info.size * 4;
+                       } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64) {
+                               tensor_buffer.buffer = new unsigned long long[tensor_info.size];
+                               tensor_buffer.size = tensor_info.size * 8;
                        } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32) {
                                tensor_buffer.buffer = new unsigned int[tensor_info.size];
                                tensor_buffer.size = tensor_info.size * 4;