From e1d5767e71d6567b78b75a57246d620d7d20edb6 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Thu, 14 Mar 2019 12:50:25 +0900 Subject: [PATCH] Revert same padding calculation (#4727) Revert same padding calculation because current padding calculation is incorrect Signed-off-by: Hyeongseok Oh --- .../neurun/src/backend/acl_cl/StageGenerator.cc | 13 ++++++++----- runtimes/neurun/src/backend/cpu/StageGenerator.cc | 12 ++++++++---- runtimes/neurun/src/util/Padding.cc | 10 ++++------ runtimes/neurun/src/util/Padding.h | 3 ++- runtimes/pure_arm_compute/src/compilation.cc | 22 ++++++++++------------ 5 files changed, 32 insertions(+), 28 deletions(-) diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc index 5817c40..d4064cc 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc @@ -231,6 +231,7 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) const auto activation_index{node.param().activation_index}; const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); + const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); neurun::util::Stride stride; @@ -274,7 +275,8 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) (ANEURALNETWORKS_PADDING_VALID == padding_type)); return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H) + ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, + ker_shape.H) : neurun::util::valid_padding(); } else // explicit padding @@ -376,7 +378,8 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node) (ANEURALNETWORKS_PADDING_VALID == padding_type)); return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride, + ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), + _ctx.at(ofm_index).shape().asFeature(), param.stride, ker_shape.W, ker_shape.H) : neurun::util::valid_padding(); } @@ -481,7 +484,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl; return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(ifm_shape, param.stride, kw, kh) + ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) : neurun::util::valid_padding(); } else // explicit padding @@ -596,7 +599,7 @@ void StageGenerator::visit(const model::operation::AvgPool2DNode &node) VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl; return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(ifm_shape, param.stride, kw, kh) + ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) : neurun::util::valid_padding(); } else // explicit padding @@ -1950,8 +1953,8 @@ void StageGenerator::visit(const model::operation::L2Pool2DNode &node) const auto activation_index{node.param().activation_index}; - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); + const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const int32_t kh = _ctx.at(kh_index).asScalar(); const int32_t kw = _ctx.at(kw_index).asScalar(); diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.cc b/runtimes/neurun/src/backend/cpu/StageGenerator.cc index 1c6a5cd..db9d74f 100644 --- a/runtimes/neurun/src/backend/cpu/StageGenerator.cc +++ b/runtimes/neurun/src/backend/cpu/StageGenerator.cc @@ -118,7 +118,8 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) (ANEURALNETWORKS_PADDING_VALID == padding_type)); return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), stride, + ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), + _ctx.at(ofm_index).shape().asFeature(), stride, _ctx.at(ker_index).shape().asKernel().W, _ctx.at(ker_index).shape().asKernel().H) : neurun::util::valid_padding(); @@ -221,7 +222,8 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node) param.stride = stride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(), stride, + ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(), + _ctx.at(ofm_index).shape().asFeature(), stride, _ctx.at(ker_index).shape().asKernel().W, _ctx.at(ker_index).shape().asKernel().H) : util::valid_padding(); @@ -313,7 +315,8 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) (ANEURALNETWORKS_PADDING_VALID == padding_type)); return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride, + ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), + _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh) : neurun::util::valid_padding(); } @@ -413,7 +416,8 @@ void StageGenerator::visit(const model::operation::AvgPool2DNode &node) (ANEURALNETWORKS_PADDING_VALID == padding_type)); return (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride, + ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), + _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh) : neurun::util::valid_padding(); } diff --git a/runtimes/neurun/src/util/Padding.cc b/runtimes/neurun/src/util/Padding.cc index 631b13a..d1735b8 100644 --- a/runtimes/neurun/src/util/Padding.cc +++ b/runtimes/neurun/src/util/Padding.cc @@ -44,7 +44,8 @@ Padding valid_padding(void) return padding; } -Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw, +Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, + const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh) { Padding padding; @@ -56,13 +57,10 @@ Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride & // padding_to_beginning = total_padding / 2 // padding_to_end = (total_padding + 1)/2. // - const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical; - const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal; - - const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh; + const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh; const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H); - const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw; + const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw; const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W); padding.top = vertical_total_padding / 2; diff --git a/runtimes/neurun/src/util/Padding.h b/runtimes/neurun/src/util/Padding.h index 0107739..05a14eb 100644 --- a/runtimes/neurun/src/util/Padding.h +++ b/runtimes/neurun/src/util/Padding.h @@ -41,7 +41,8 @@ struct Stride }; Padding valid_padding(void); -Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw, +Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, + const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh); } // namespace util diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index 13c83a6..ac13fb8 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -143,7 +143,8 @@ Padding valid_padding(void) return padding; } -Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw, +Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, + const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh) { Padding padding; @@ -155,13 +156,10 @@ Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride & // padding_to_beginning = total_padding / 2 // padding_to_end = (total_padding + 1)/2. // - const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical; - const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal; - - const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh; + const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh; const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H); - const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw; + const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw; const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W); padding.top = vertical_total_padding / 2; @@ -997,7 +995,7 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Implicit::Node &node) param.stride = stride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H) + ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H) : valid_padding(); param.activation = static_cast(_ctx.at(activation_index).asScalar()); @@ -1277,7 +1275,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod param.stride = stride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H) + ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H) : valid_padding(); param.multipler = multiplier; @@ -1640,7 +1638,7 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::Implicit::Node &nod param.stride.horizontal = hstride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, param.stride, kw, kh) + ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) : valid_padding(); param.activation = static_cast(_ctx.at(activation_index).asScalar()); @@ -1871,7 +1869,7 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::Implicit::Node &nod param.stride.horizontal = hstride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, param.stride, kw, kh) + ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) : valid_padding(); param.activation = static_cast(_ctx.at(activation_index).asScalar()); @@ -4087,7 +4085,7 @@ void Planner::visit(const ::internal::tflite::op::TransposeConv::Node &node) param.stride.vertical = vstride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ofm_shape, param.stride, ker_shape.W, ker_shape.H) + ? same_padding(ofm_shape, ifm_shape, param.stride, ker_shape.W, ker_shape.H) : valid_padding(); auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) { @@ -4827,7 +4825,7 @@ void Planner::visit(const ::internal::tflite::op::L2Pool2D::Implicit::Node &node param.stride.horizontal = hstride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, param.stride, kw, kh) + ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) : valid_padding(); param.activation = static_cast(_ctx.at(activation_index).asScalar()); -- 2.7.4