From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Wed, 10 Apr 2019 05:41:06 +0000 (+0900) Subject: Remove tflite macro and type in cker library (#4966) X-Git-Tag: accepted/tizen/unified/20190430.113441~40 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2399e58f09b7b3fb54bde746dee54eb02f49870b;p=platform%2Fcore%2Fml%2Fnnfw.git Remove tflite macro and type in cker library (#4966) Remove tflite macro TFLITE_DCHECK_XX in cker library Use uint8_t instead of tlfite type uint8 Signed-off-by: Hyeongseok Oh --- diff --git a/libs/cker/include/cker/operation/Conv.h b/libs/cker/include/cker/operation/Conv.h index d232dbc..77768b7 100644 --- a/libs/cker/include/cker/operation/Conv.h +++ b/libs/cker/include/cker/operation/Conv.h @@ -143,7 +143,7 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + assert(output_activation_min <= output_activation_max); assert(input_shape.DimensionsCount() == 4); assert(filter_shape.DimensionsCount() == 4); @@ -202,7 +202,7 @@ inline void Conv(const ConvParams ¶ms, const Shape &input_shape, const uint8 acc = std::max(acc, output_activation_min); acc = std::min(acc, output_activation_max); output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(acc); + static_cast(acc); } } } diff --git a/libs/cker/include/cker/operation/FullyConnected.h b/libs/cker/include/cker/operation/FullyConnected.h index 60218e3..54da0f7 100644 --- a/libs/cker/include/cker/operation/FullyConnected.h +++ b/libs/cker/include/cker/operation/FullyConnected.h @@ -95,10 +95,10 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); + assert(filter_shape.DimensionsCount() >= 2); + assert(output_shape.DimensionsCount() >= 1); - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + assert(output_activation_min <= output_activation_max); // TODO(benoitjacob): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd @@ -129,7 +129,7 @@ inline void FullyConnected(const FullyConnectedParams ¶ms, const Shape &inpu acc += output_offset; acc = std::max(acc, output_activation_min); acc = std::min(acc, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc); + output_data[out_c + output_depth * b] = static_cast(acc); } } }