From e7ea87f97e03360719d132a71acc1eb2f93c249f Mon Sep 17 00:00:00 2001 From: Suharsh Sivakumar Date: Sat, 7 Apr 2018 10:15:58 -0700 Subject: [PATCH] Automated g4 rollback of changelist 191938267 PiperOrigin-RevId: 192007784 --- .../kernels/internal/optimized/optimized_ops.h | 28 ++--- .../kernels/internal/reference/reference_ops.h | 13 +-- tensorflow/contrib/lite/kernels/pad.cc | 27 ++--- tensorflow/contrib/lite/kernels/pad_test.cc | 129 +++------------------ 4 files changed, 39 insertions(+), 158 deletions(-) diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h index 7a383fb..9a27461 100644 --- a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h @@ -5067,7 +5067,7 @@ template inline void Pad(const T* input_data, const Dims<4>& input_dims, const std::vector& left_paddings, const std::vector& right_paddings, T* output_data, - const Dims<4>& output_dims, const int32_t pad_value) { + const Dims<4>& output_dims) { gemmlowp::ScopedProfilingLabel label("Pad"); const int output_batch = ArraySize(output_dims, 3); const int output_height = ArraySize(output_dims, 2); @@ -5087,27 +5087,27 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims, const int input_depth = ArraySize(input_dims, 0); if (left_b_padding != 0) { - memset(output_data, pad_value, + memset(output_data, 0, left_b_padding * output_height * output_width * output_depth * sizeof(T)); } for (int out_b = left_b_padding; out_b < output_batch - right_b_padding; ++out_b) { if (left_h_padding != 0) { - memset(output_data + Offset(output_dims, 0, 0, 0, out_b), pad_value, + memset(output_data + Offset(output_dims, 0, 0, 0, out_b), 0, left_h_padding * output_width * output_depth * sizeof(T)); } for (int out_h = left_h_padding; out_h < output_height - right_h_padding; ++out_h) { if (left_w_padding != 0) { - memset(output_data + Offset(output_dims, 0, 0, out_h, out_b), pad_value, + memset(output_data + Offset(output_dims, 0, 0, out_h, out_b), 0, left_w_padding * output_depth * sizeof(T)); } for (int out_w = left_w_padding; out_w < output_width - right_w_padding; ++out_w) { if (left_d_padding != 0) { - memset(output_data + Offset(output_dims, 0, out_w, out_h, out_b), - pad_value, left_d_padding * sizeof(T)); + memset(output_data + Offset(output_dims, 0, out_w, out_h, out_b), 0, + left_d_padding * sizeof(T)); } T* out = output_data + @@ -5121,21 +5121,20 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims, memset( output_data + Offset(output_dims, output_depth - right_d_padding, out_w, out_h, out_b), - pad_value, right_d_padding * sizeof(T)); + 0, right_d_padding * sizeof(T)); } } if (right_w_padding != 0) { memset( output_data + Offset(output_dims, 0, output_width - right_w_padding, out_h, out_b), - pad_value, right_w_padding * output_depth * sizeof(T)); + 0, right_w_padding * output_depth * sizeof(T)); } } if (right_h_padding != 0) { memset(output_data + Offset(output_dims, 0, 0, output_height - right_h_padding, out_b), - pad_value, - right_h_padding * output_width * output_depth * sizeof(T)); + 0, right_h_padding * output_width * output_depth * sizeof(T)); } } if (right_b_padding != 0) { @@ -5148,15 +5147,6 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims, } template -inline void Pad(const T* input_data, const Dims<4>& input_dims, - const std::vector& left_paddings, - const std::vector& right_paddings, T* output_data, - const Dims<4>& output_dims) { - Pad(input_data, input_dims, left_paddings, right_paddings, output_data, - output_dims, 0); -} - -template inline void StridedSlice(const T* input_data, const Dims<4>& input_dims, int begin_mask, int end_mask, const std::vector& starts, diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h index 3245bf6..31e190e 100644 --- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h +++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h @@ -3086,7 +3086,7 @@ template inline void Pad(const T* input_data, const Dims<4>& input_dims, const std::vector& left_paddings, const std::vector& right_paddings, T* output_data, - const Dims<4>& output_dims, const int32_t pad_value) { + const Dims<4>& output_dims) { const int output_batch = ArraySize(output_dims, 3); const int output_height = ArraySize(output_dims, 2); const int output_width = ArraySize(output_dims, 1); @@ -3116,7 +3116,7 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims, out_w >= output_width - right_w_padding || out_d < left_d_padding || out_d >= output_depth - right_d_padding) { - *out_ptr++ = static_cast(pad_value); + *out_ptr++ = 0; } else { *out_ptr++ = *in_ptr++; } @@ -3126,15 +3126,6 @@ inline void Pad(const T* input_data, const Dims<4>& input_dims, } } -template -inline void Pad(const T* input_data, const Dims<4>& input_dims, - const std::vector& left_paddings, - const std::vector& right_paddings, T* output_data, - const Dims<4>& output_dims) { - Pad(input_data, input_dims, left_paddings, right_paddings, output_data, - output_dims, 0); -} - inline bool LoopCondition(int index, int stop, int stride) { return stride > 0 ? index < stop : index > stop; } diff --git a/tensorflow/contrib/lite/kernels/pad.cc b/tensorflow/contrib/lite/kernels/pad.cc index 4f9449a..c29da38 100644 --- a/tensorflow/contrib/lite/kernels/pad.cc +++ b/tensorflow/contrib/lite/kernels/pad.cc @@ -119,46 +119,39 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { after_padding.push_back(paddings_data[idx * 2 + 1]); } -#define TF_LITE_PAD(type, scalar, pad_value) \ +#define TF_LITE_PAD(type, scalar) \ type::Pad(GetTensorData(op_context.input), \ GetTensorDims(op_context.input), before_padding, after_padding, \ GetTensorData(op_context.output), \ - GetTensorDims(op_context.output), pad_value) + GetTensorDims(op_context.output)) switch (op_context.input->type) { case kTfLiteFloat32: if (kernel_type == kReference) { - TF_LITE_PAD(reference_ops, float, 0); + TF_LITE_PAD(reference_ops, float); } else if (kernel_type == kGenericOptimized) { - TF_LITE_PAD(optimized_ops, float, 0); + TF_LITE_PAD(optimized_ops, float); } break; case kTfLiteUInt8: - // Quantized Pad requires that 0 is represented in the quantized range. - TF_LITE_ENSURE(context, op_context.output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, op_context.output->params.zero_point <= - std::numeric_limits::max()); if (kernel_type == kReference) { - TF_LITE_PAD(reference_ops, uint8_t, - op_context.output->params.zero_point); + TF_LITE_PAD(reference_ops, uint8_t); } else if (kernel_type == kGenericOptimized) { - TF_LITE_PAD(optimized_ops, uint8_t, - op_context.output->params.zero_point); + TF_LITE_PAD(optimized_ops, uint8_t); } break; case kTfLiteInt32: if (kernel_type == kReference) { - TF_LITE_PAD(reference_ops, int32_t, 0); + TF_LITE_PAD(reference_ops, int32_t); } else if (kernel_type == kGenericOptimized) { - TF_LITE_PAD(optimized_ops, int32_t, 0); + TF_LITE_PAD(optimized_ops, int32_t); } break; case kTfLiteInt64: if (kernel_type == kReference) { - TF_LITE_PAD(reference_ops, int64_t, 0); + TF_LITE_PAD(reference_ops, int64_t); } else if (kernel_type == kGenericOptimized) { - TF_LITE_PAD(optimized_ops, int64_t, 0); + TF_LITE_PAD(optimized_ops, int64_t); } break; default: diff --git a/tensorflow/contrib/lite/kernels/pad_test.cc b/tensorflow/contrib/lite/kernels/pad_test.cc index c06237e..28834ad 100644 --- a/tensorflow/contrib/lite/kernels/pad_test.cc +++ b/tensorflow/contrib/lite/kernels/pad_test.cc @@ -22,7 +22,6 @@ namespace tflite { namespace { using ::testing::ElementsAreArray; -using ::testing::Matcher; class PadOpModel : public SingleOpModel { public: @@ -30,10 +29,6 @@ class PadOpModel : public SingleOpModel { PopulateTensor(input_, data); } - void SetQuantizedInput(std::initializer_list data) { - QuantizeAndPopulate(input_, data); - } - void SetPaddings(std::initializer_list paddings) { PopulateTensor(paddings_, paddings); } @@ -41,11 +36,6 @@ class PadOpModel : public SingleOpModel { std::vector GetOutput() { return ExtractVector(output_); } std::vector GetOutputShape() { return GetTensorShape(output_); } - std::vector GetDequantizedOutput() { - return Dequantize(ExtractVector(output_), - GetScale(output_), GetZeroPoint(output_)); - } - protected: int input_; int output_; @@ -60,17 +50,16 @@ class PadOpModel : public SingleOpModel { // m.Invoke(); class PadOpConstModel : public PadOpModel { public: - PadOpConstModel(const TensorData& input, + PadOpConstModel(std::initializer_list input_shape, std::initializer_list paddings_shape, - std::initializer_list paddings, - const TensorData& output) { - input_ = AddInput(input); + std::initializer_list paddings) { + input_ = AddInput(TensorType_FLOAT32); paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_shape); - output_ = AddOutput(output); + output_ = AddOutput(TensorType_FLOAT32); SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions, CreatePadOptions(builder_).Union()); - BuildInterpreter({input.shape}); + BuildInterpreter({input_shape}); } }; @@ -83,45 +72,40 @@ class PadOpConstModel : public PadOpModel { // m.Invoke(); class PadOpDynamicModel : public PadOpModel { public: - PadOpDynamicModel(const TensorData& input, - std::initializer_list paddings_shape, - const TensorData& output) { - input_ = AddInput(input); + PadOpDynamicModel(std::initializer_list input_shape, + std::initializer_list paddings_shape) { + input_ = AddInput(TensorType_FLOAT32); paddings_ = AddInput(TensorType_INT32); - output_ = AddOutput(output); + output_ = AddOutput(TensorType_FLOAT32); SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions, CreatePadOptions(builder_).Union()); - BuildInterpreter({input.shape, paddings_shape}); + BuildInterpreter({input_shape, paddings_shape}); } }; TEST(PadOpTest, TooManyDimensions) { EXPECT_DEATH( - PadOpConstModel({TensorType_FLOAT32, {1, 2, 3, 4, 5, 6, 7, 8, 9}}, {9, 2}, - {1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9}, - {TensorType_FLOAT32}), + PadOpConstModel({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9, 2}, + {1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9}), "dims != 4"); } TEST(PadOpTest, UnequalDimensions) { - EXPECT_DEATH(PadOpConstModel({TensorType_FLOAT32, {1, 1, 2, 1}}, {3, 2}, - {1, 1, 2, 2, 3, 3}, {TensorType_FLOAT32}), + EXPECT_DEATH(PadOpConstModel({1, 1, 2, 1}, {3, 2}, {1, 1, 2, 2, 3, 3}), "3 != 4"); } TEST(PadOpTest, InvalidPadValue) { EXPECT_DEATH( - PadOpConstModel({TensorType_FLOAT32, {1, 1, 2, 1}}, {4, 2}, - {0, 0, 1, -1, 2, -1, 0, 0}, {TensorType_FLOAT32}), + PadOpConstModel({1, 1, 2, 1}, {4, 2}, {0, 0, 1, -1, 2, -1, 0, 0}), "Pad value has to be greater than equal to 0."); } TEST(PadOpTest, SimpleConstTest) { // Padding is represented as four 2-D lists representing above padding and // below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}). - PadOpConstModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, - {0, 0, 1, 1, 1, 1, 0, 0}, {TensorType_FLOAT32}); + PadOpConstModel m({1, 2, 2, 1}, {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0}); m.SetInput({1, 2, 3, 4}); m.Invoke(); EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, @@ -130,8 +114,7 @@ TEST(PadOpTest, SimpleConstTest) { } TEST(PadOpTest, SimpleDynamicTest) { - PadOpDynamicModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, - {TensorType_FLOAT32}); + PadOpDynamicModel m({1, 2, 2, 1}, {4, 2}); m.SetInput({1, 2, 3, 4}); m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0}); m.Invoke(); @@ -141,8 +124,7 @@ TEST(PadOpTest, SimpleDynamicTest) { } TEST(PadOpTest, AdvancedConstTest) { - PadOpConstModel m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2}, - {0, 0, 0, 2, 1, 3, 0, 0}, {TensorType_FLOAT32}); + PadOpConstModel m({1, 2, 3, 1}, {4, 2}, {0, 0, 0, 2, 1, 3, 0, 0}); m.SetInput({1, 2, 3, 4, 5, 6}); m.Invoke(); EXPECT_THAT(m.GetOutput(), @@ -152,8 +134,7 @@ TEST(PadOpTest, AdvancedConstTest) { } TEST(PadOpTest, AdvancedDynamicTest) { - PadOpDynamicModel m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2}, - {TensorType_FLOAT32}); + PadOpDynamicModel m({1, 2, 3, 1}, {4, 2}); m.SetInput({1, 2, 3, 4, 5, 6}); m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0}); m.Invoke(); @@ -163,80 +144,6 @@ TEST(PadOpTest, AdvancedDynamicTest) { EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1})); } -class QuantizedPadOpTest : public ::testing::Test { - protected: - std::vector> DequantizedArrayNear( - const std::vector& values, const float min, const float max) { - const float quantization_tolerance = (max - min) / 255.0; - return ArrayFloatNear(values, quantization_tolerance); - } -}; - -TEST_F(QuantizedPadOpTest, ZeroNotInQuantizationRange) { - // The test_util and actual quantization code currently ensure that the range - // must include zero, but if that ever changes, this test will catch it. - EXPECT_DEATH(PadOpConstModel m({TensorType_UINT8, {1, 2, 2, 1}, 1.0, 2.0}, - {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0}, - {TensorType_UINT8, {}, 1.0, 2.0}), - ".*Check failed: f_min <= 0.*"); -} - -TEST_F(QuantizedPadOpTest, SimpleConstTest) { - // Padding is represented as four 2-D lists representing above padding and - // below padding (i.e. {{0, 0}, {1, 1}, {1, 1}, {0, 0}}). - PadOpConstModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2}, - {0, 0, 1, 1, 1, 1, 0, 0}, - {TensorType_UINT8, {}, -1.0, 1.0}); - m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7}); - m.Invoke(); - EXPECT_THAT(m.GetDequantizedOutput(), - ElementsAreArray(DequantizedArrayNear( - {0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0}, - -1.0, 1.0))); - EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1})); -} - -TEST_F(QuantizedPadOpTest, SimpleDynamicTest) { - PadOpDynamicModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2}, - {TensorType_UINT8, {}, -1.0, 1.0}); - m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7}); - m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0}); - m.Invoke(); - EXPECT_THAT(m.GetDequantizedOutput(), - ElementsAreArray(DequantizedArrayNear( - {0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0}, - -1.0, 1.0))); - EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1})); -} - -TEST_F(QuantizedPadOpTest, AdvancedConstTest) { - PadOpConstModel m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2}, - {0, 0, 0, 2, 1, 3, 0, 0}, - {TensorType_UINT8, {}, -1.0, 1.0}); - m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3}); - m.Invoke(); - EXPECT_THAT(m.GetDequantizedOutput(), - ElementsAreArray(DequantizedArrayNear( - {0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - -1.0, 1.0))); - EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1})); -} - -TEST_F(QuantizedPadOpTest, AdvancedDynamicTest) { - PadOpDynamicModel m({TensorType_UINT8, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2}, - {TensorType_UINT8, {}, -1.0, 1.0}); - m.SetQuantizedInput({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3}); - m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0}); - m.Invoke(); - EXPECT_THAT(m.GetDequantizedOutput(), - ElementsAreArray(DequantizedArrayNear( - {0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - -1.0, 1.0))); - EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1})); -} - } // namespace } // namespace tflite -- 2.7.4