mv_inference: drop data types not supported
authorInki Dae <inki.dae@samsung.com>
Mon, 13 Apr 2020 07:40:40 +0000 (16:40 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:19 +0000 (09:42 +0900)
Mediavision doesn't support float16 and uint16/32bit model
so drop these data types. They can be added later with actual
use case.

Change-Id: Iebe7511326f45c39b09a4429f8de361ca8fe354c
Signed-off-by: Inki Dae <inki.dae@samsung.com>
include/mv_inference_type.h
mv_inference/inference/src/Inference.cpp

index f31a900..58e2faa 100644 (file)
@@ -68,11 +68,8 @@ typedef enum {
  *
  */
 typedef enum {
-       MV_INFERENCE_DATA_FLOAT16   = 0,
-       MV_INFERENCE_DATA_FLOAT32,
-       MV_INFERENCE_DATA_UINT8,
-       MV_INFERENCE_DATA_UINT16,
-       MV_INFERENCE_DATA_UINT32
+       MV_INFERENCE_DATA_FLOAT32 = 0,
+       MV_INFERENCE_DATA_UINT8
 } mv_inference_data_type_e;
 
 /**
index 55d6e59..bec4efd 100755 (executable)
@@ -236,21 +236,12 @@ inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
        inference_tensor_data_type_e type = TENSOR_DATA_TYPE_FLOAT32;
 
        switch (given_type) {
-       case MV_INFERENCE_DATA_FLOAT16:
-               type = TENSOR_DATA_TYPE_FLOAT16;
-               break;
        case MV_INFERENCE_DATA_FLOAT32:
                type = TENSOR_DATA_TYPE_FLOAT32;
                break;
        case MV_INFERENCE_DATA_UINT8:
                type = TENSOR_DATA_TYPE_UINT8;
                break;
-       case MV_INFERENCE_DATA_UINT16:
-               type = TENSOR_DATA_TYPE_UINT16;
-               break;
-       case MV_INFERENCE_DATA_UINT32:
-               type = TENSOR_DATA_TYPE_UINT32;
-               break;
        default:
                LOGI("unknown data type so FLOAT32 data type will be used in default");
                break;