Correction in Padding_Same untility function (#3629)
authorShubham Gupta/SNAP /SRI-Bangalore/Engineer/삼성전자 <shub98.gupta@samsung.com>
Mon, 26 Nov 2018 01:11:57 +0000 (06:41 +0530)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 26 Nov 2018 01:11:57 +0000 (10:11 +0900)
This patch will correct the Paddin_Same function logic.

Signed-off-by: shubham <shub98.gupta@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc

index b248f63..c736a93 100644 (file)
@@ -177,8 +177,7 @@ Padding valid_padding(void)
   return padding;
 }
 
-Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
-                     const nnfw::util::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::util::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
                      uint32_t kh)
 {
   Padding padding;
@@ -190,10 +189,13 @@ Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
   // padding_to_beginning = total_padding / 2
   // padding_to_end = (total_padding + 1)/2.
   //
-  const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
+  const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
+  const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
+
+  const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh;
   const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
 
-  const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
+  const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw;
   const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
 
   padding.top = vertical_total_padding / 2;
@@ -1028,7 +1030,7 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Implicit::Node &node)
 
   param.stride = stride;
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
+                      ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
                       : valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
@@ -1308,7 +1310,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
 
   param.stride = stride;
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
+                      ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
                       : valid_padding();
 
   param.multipler = multiplier;
@@ -1641,7 +1643,7 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::Implicit::Node &nod
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      ? same_padding(ifm_shape, param.stride, kw, kh)
                       : valid_padding();
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -1872,7 +1874,7 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::Implicit::Node &nod
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      ? same_padding(ifm_shape, param.stride, kw, kh)
                       : valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
@@ -4440,7 +4442,7 @@ void Planner::visit(const ::internal::tflite::op::L2Pool2D::Implicit::Node &node
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      ? same_padding(ifm_shape, param.stride, kw, kh)
                       : valid_padding();
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());