mv_inference: Fix tensor data type
authorInki Dae <inki.dae@samsung.com>
Mon, 15 Jun 2020 08:23:37 +0000 (17:23 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 25 Jun 2020 23:58:22 +0000 (08:58 +0900)
Output tensor type of Inception v3 model converted to Vivante NPU
is UINT16.

Change-Id: Ic9b568aa42b6e77e63d404688efc59cfa8038266
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index 62262ad..6a032a3 100644 (file)
@@ -611,7 +611,7 @@ namespace inference
                                        tensor_buffer.buffer = new unsigned char[tensor_info.size];
                                        tensor_buffer.size = tensor_info.size;
                                } else if (tensor_info.data_type ==
-                                                  INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
+                                                  INFERENCE_TENSOR_DATA_TYPE_UINT16) {
                                        tensor_buffer.buffer = new short[tensor_info.size];
                                        tensor_buffer.size = tensor_info.size;
                                } else {
@@ -663,7 +663,7 @@ namespace inference
                                        tensor_buffer.buffer = new char[tensor_info.size];
                                        tensor_buffer.size = tensor_info.size;
                                } else if (tensor_info.data_type ==
-                                                  INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
+                                                  INFERENCE_TENSOR_DATA_TYPE_UINT16) {
                                        tensor_buffer.buffer = new short[tensor_info.size];
                                        tensor_buffer.size = tensor_info.size;
                                } else {
@@ -728,7 +728,7 @@ namespace inference
                                        delete[] ori_buf;
                        }
 
-                       if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT16) {
+                       if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
                                float *new_buf = new float[tensor_info.size];
                                if (new_buf == NULL) {
                                        LOGE("Fail to allocate a new output tensor buffer.");