From 2399e58f09b7b3fb54bde746dee54eb02f49870b Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 10 Apr 2019 14:41:06 +0900 Subject: [PATCH] Remove tflite macro and type in cker library (#4966) Remove tflite macro TFLITE_DCHECK_XX in cker library Use uint8_t instead of tlfite type uint8 Signed-off-by: Hyeongseok Oh --- libs/cker/include/cker/operation/Conv.h | 4 ++-- libs/cker/include/cker/operation/FullyConnected.h | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libs/cker/include/cker/operation/Conv.h b/libs/cker/include/cker/operation/Conv.h index d232dbc..77768b7 100644 --- a/libs/cker/include/cker/operation/Conv.h +++ b/libs/cker/include/cker/operation/Conv.h @@ -143,7 +143,7 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + assert(output_activation_min <= output_activation_max); assert(input_shape.DimensionsCount() == 4); assert(filter_shape.DimensionsCount() == 4); @@ -202,7 +202,7 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 acc = std::max(acc, output_activation_min); acc = std::min(acc, output_activation_max); output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(acc); + static_cast(acc); } } } diff --git a/libs/cker/include/cker/operation/FullyConnected.h b/libs/cker/include/cker/operation/FullyConnected.h index 60218e3..54da0f7 100644 --- a/libs/cker/include/cker/operation/FullyConnected.h +++ b/libs/cker/include/cker/operation/FullyConnected.h @@ -95,10 +95,10 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); + assert(filter_shape.DimensionsCount() >= 2); + assert(output_shape.DimensionsCount() >= 1); - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + assert(output_activation_min <= output_activation_max); // TODO(benoitjacob): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd @@ -129,7 +129,7 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu acc += output_offset; acc = std::max(acc, output_activation_min); acc = std::min(acc, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc); + output_data[out_c + output_depth * b] = static_cast(acc); } } } -- 2.7.4