From a2db73447171efa72e20f3bf020761ca3ebe9af2 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Fri, 17 Apr 2020 16:40:05 +0900 Subject: [PATCH] clean up enumeration prefix Change-Id: I48bfa0d402dcf0f68f8f6f1afde34afb35ae84c2 Signed-off-by: Inki Dae --- include/inference_engine_type.h | 20 ++++---- test/src/inference_engine_test.cpp | 78 +++++++++++++++--------------- 2 files changed, 48 insertions(+), 50 deletions(-) diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index 5a3a653..ef7e74f 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -80,8 +80,8 @@ typedef enum { * */ typedef enum { - TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, a number of channels, height, width. */ - TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, a number of channels. */ + INFERENCE_TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, a number of channels, height, width. */ + INFERENCE_TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, a number of channels. */ } inference_tensor_shape_type_e; /** @@ -91,17 +91,15 @@ typedef enum { * */ typedef enum { - TENSOR_DATA_TYPE_NONE = 0, - TENSOR_DATA_TYPE_FLOAT16, - TENSOR_DATA_TYPE_FLOAT32, - TENSOR_DATA_TYPE_UINT8, - TENSOR_DATA_TYPE_UINT16, - TENSOR_DATA_TYPE_UINT32, - TENSOR_DATA_TYPE_MAX + INFERENCE_TENSOR_DATA_TYPE_NONE = 0, + INFERENCE_TENSOR_DATA_TYPE_FLOAT16, + INFERENCE_TENSOR_DATA_TYPE_FLOAT32, + INFERENCE_TENSOR_DATA_TYPE_UINT8, + INFERENCE_TENSOR_DATA_TYPE_UINT16, + INFERENCE_TENSOR_DATA_TYPE_UINT32, + INFERENCE_TENSOR_DATA_TYPE_MAX } inference_tensor_data_type_e; -#define INFERENCE_TARGET_MASK (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU | INFERENCE_TARGET_CUSTOM) - /** * @brief Tensor defined by the dimension and their corresponding data * @details @a dimInfo is the information diff --git a/test/src/inference_engine_test.cpp b/test/src/inference_engine_test.cpp index 1199a24..b0a457c 100644 --- a/test/src/inference_engine_test.cpp +++ b/test/src/inference_engine_test.cpp @@ -147,10 +147,10 @@ int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector &inputs, s continue; } - if (tensor_buffer.data_type == TENSOR_DATA_TYPE_FLOAT32) + if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) delete[] (float *)tensor_buffer.buffer; else delete[] (unsigned char *)tensor_buffer.buffer; @@ -228,7 +228,7 @@ void CleanupTensorBuffers(std::vector &inputs, s continue; } - if (tensor_buffer.data_type == TENSOR_DATA_TYPE_FLOAT32) + if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) delete[] (float *)tensor_buffer.buffer; else delete[] (unsigned char *)tensor_buffer.buffer; @@ -321,7 +321,7 @@ void FillOutputResult(InferenceEngineCommon *engine, std::vector