Remove tflite macro and type in cker library (#4966)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 10 Apr 2019 05:41:06 +0000 (14:41 +0900)
committer박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Wed, 10 Apr 2019 05:41:06 +0000 (14:41 +0900)
Remove tflite macro TFLITE_DCHECK_XX in cker library
Use uint8_t instead of tlfite type uint8

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
libs/cker/include/cker/operation/Conv.h
libs/cker/include/cker/operation/FullyConnected.h

index d232dbc..77768b7 100644 (file)
@@ -143,7 +143,7 @@ inline void Conv(const ConvParams &params, const Shape &input_shape, const uint8
   const int output_shift = params.output_shift;
   const int32_t output_activation_min = params.quantized_activation_min;
   const int32_t output_activation_max = params.quantized_activation_max;
-  TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+  assert(output_activation_min <= output_activation_max);
 
   assert(input_shape.DimensionsCount() == 4);
   assert(filter_shape.DimensionsCount() == 4);
@@ -202,7 +202,7 @@ inline void Conv(const ConvParams &params, const Shape &input_shape, const uint8
           acc = std::max(acc, output_activation_min);
           acc = std::min(acc, output_activation_max);
           output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
-              static_cast<uint8>(acc);
+              static_cast<uint8_t>(acc);
         }
       }
     }
index 60218e3..54da0f7 100644 (file)
@@ -95,10 +95,10 @@ inline void FullyConnected(const FullyConnectedParams &params, const Shape &inpu
   const int output_shift = params.output_shift;
   const int32_t output_activation_min = params.quantized_activation_min;
   const int32_t output_activation_max = params.quantized_activation_max;
-  TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
-  TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
+  assert(filter_shape.DimensionsCount() >= 2);
+  assert(output_shape.DimensionsCount() >= 1);
 
-  TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+  assert(output_activation_min <= output_activation_max);
   // TODO(benoitjacob): This really should be:
   //     const int batches = ArraySize(output_dims, 1);
   // but the current --variable_batch hack consists in overwriting the 3rd
@@ -129,7 +129,7 @@ inline void FullyConnected(const FullyConnectedParams &params, const Shape &inpu
       acc += output_offset;
       acc = std::max(acc, output_activation_min);
       acc = std::min(acc, output_activation_max);
-      output_data[out_c + output_depth * b] = static_cast<uint8>(acc);
+      output_data[out_c + output_depth * b] = static_cast<uint8_t>(acc);
     }
   }
 }