Revert same padding calculation (#4727)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 14 Mar 2019 03:50:25 +0000 (12:50 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 14 Mar 2019 03:50:25 +0000 (12:50 +0900)
Revert same padding calculation because current padding calculation is incorrect

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
runtimes/neurun/src/backend/cpu/StageGenerator.cc
runtimes/neurun/src/util/Padding.cc
runtimes/neurun/src/util/Padding.h
runtimes/pure_arm_compute/src/compilation.cc

index 5817c40..d4064cc 100644 (file)
@@ -231,6 +231,7 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
   const auto activation_index{node.param().activation_index};
 
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
   const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
 
   neurun::util::Stride stride;
@@ -274,7 +275,8 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
              (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
+                 ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W,
+                                              ker_shape.H)
                  : neurun::util::valid_padding();
     }
     else // explicit padding
@@ -376,7 +378,8 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
              (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride,
+                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                                              _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               ker_shape.W, ker_shape.H)
                  : neurun::util::valid_padding();
     }
@@ -481,7 +484,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
       VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(ifm_shape, param.stride, kw, kh)
+                 ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                  : neurun::util::valid_padding();
     }
     else // explicit padding
@@ -596,7 +599,7 @@ void StageGenerator::visit(const model::operation::AvgPool2DNode &node)
       VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(ifm_shape, param.stride, kw, kh)
+                 ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                  : neurun::util::valid_padding();
     }
     else // explicit padding
@@ -1950,8 +1953,8 @@ void StageGenerator::visit(const model::operation::L2Pool2DNode &node)
 
   const auto activation_index{node.param().activation_index};
 
-  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
 
   const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
   const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
index 1c6a5cd..db9d74f 100644 (file)
@@ -118,7 +118,8 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
              (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), stride,
+                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                                              _ctx.at(ofm_index).shape().asFeature(), stride,
                                               _ctx.at(ker_index).shape().asKernel().W,
                                               _ctx.at(ker_index).shape().asKernel().H)
                  : neurun::util::valid_padding();
@@ -221,7 +222,8 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
 
   param.stride = stride;
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(), stride,
+                      ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                                           _ctx.at(ofm_index).shape().asFeature(), stride,
                                            _ctx.at(ker_index).shape().asKernel().W,
                                            _ctx.at(ker_index).shape().asKernel().H)
                       : util::valid_padding();
@@ -313,7 +315,8 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
              (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride,
+                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                                              _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               kw, kh)
                  : neurun::util::valid_padding();
     }
@@ -413,7 +416,8 @@ void StageGenerator::visit(const model::operation::AvgPool2DNode &node)
              (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
       return (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride,
+                 ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                                              _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               kw, kh)
                  : neurun::util::valid_padding();
     }
index 631b13a..d1735b8 100644 (file)
@@ -44,7 +44,8 @@ Padding valid_padding(void)
   return padding;
 }
 
-Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
+                     const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
                      uint32_t kh)
 {
   Padding padding;
@@ -56,13 +57,10 @@ Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &
   // padding_to_beginning = total_padding / 2
   // padding_to_end = (total_padding + 1)/2.
   //
-  const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
-  const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
-
-  const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh;
+  const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
   const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
 
-  const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw;
+  const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
   const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
 
   padding.top = vertical_total_padding / 2;
index 0107739..05a14eb 100644 (file)
@@ -41,7 +41,8 @@ struct Stride
 };
 
 Padding valid_padding(void);
-Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
+                     const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
                      uint32_t kh);
 
 } // namespace util
index 13c83a6..ac13fb8 100644 (file)
@@ -143,7 +143,8 @@ Padding valid_padding(void)
   return padding;
 }
 
-Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
+                     const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
                      uint32_t kh)
 {
   Padding padding;
@@ -155,13 +156,10 @@ Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &
   // padding_to_beginning = total_padding / 2
   // padding_to_end = (total_padding + 1)/2.
   //
-  const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
-  const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
-
-  const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh;
+  const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
   const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
 
-  const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw;
+  const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
   const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
 
   padding.top = vertical_total_padding / 2;
@@ -997,7 +995,7 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Implicit::Node &node)
 
   param.stride = stride;
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
+                      ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
                       : valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
@@ -1277,7 +1275,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
 
   param.stride = stride;
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
+                      ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
                       : valid_padding();
 
   param.multipler = multiplier;
@@ -1640,7 +1638,7 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::Implicit::Node &nod
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, param.stride, kw, kh)
+                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                       : valid_padding();
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -1871,7 +1869,7 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::Implicit::Node &nod
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, param.stride, kw, kh)
+                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                       : valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
@@ -4087,7 +4085,7 @@ void Planner::visit(const ::internal::tflite::op::TransposeConv::Node &node)
   param.stride.vertical = vstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ofm_shape, param.stride, ker_shape.W, ker_shape.H)
+                      ? same_padding(ofm_shape, ifm_shape, param.stride, ker_shape.W, ker_shape.H)
                       : valid_padding();
 
   auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
@@ -4827,7 +4825,7 @@ void Planner::visit(const ::internal::tflite::op::L2Pool2D::Implicit::Node &node
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, param.stride, kw, kh)
+                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                       : valid_padding();
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());