[Pure ACL] Support valid padding for MaxPool2D (#670)
author박종현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh1302.park@samsung.com>
Mon, 16 Apr 2018 00:13:55 +0000 (09:13 +0900)
committer김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh0822.kim@samsung.com>
Mon, 16 Apr 2018 00:13:55 +0000 (09:13 +0900)
* [Pure ACL] Support valid padding for MaxPool2D

This commit supports valid padding for MaxPool2D.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
* Remove unnecessary blank line

tools/nnapi_bindings/bindings/pure_arm_compute/src/compilation.cc
tools/nnapi_bindings/include/nnapi.h

index 22919f2..29168e7 100644 (file)
@@ -30,6 +30,55 @@ struct Stride
   uint32_t horizontal;
 };
 
+Padding valid_padding(void)
+{
+  //
+  // ANEURALNETWORKS_PADDING_VALID
+  //
+  // VALID padding. No padding.
+  //
+  // When the input size is not evenly divisible by the filter size,
+  // the input at the end that could not fill the whole filter tile
+  // will simply be ignored.
+  //
+  Padding padding;
+
+  padding.top = 0;
+  padding.bottom = 0;
+  padding.left = 0;
+  padding.right = 0;
+
+  return padding;
+}
+
+Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
+                     const nnfw::util::feature::Shape &ofm_shape,
+                     const Stride &stride,
+                     uint32_t kw, uint32_t kh)
+{
+  Padding padding;
+
+  // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
+  //
+  // SAME padding. Padding on both ends are the "same":
+  //
+  //   padding_to_beginning = total_padding / 2
+  //  padding_to_end = (total_padding + 1)/2.
+  //
+  const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
+  const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
+
+  const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
+  const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
+
+  padding.top = vertical_total_padding / 2;
+  padding.bottom = (vertical_total_padding + 1) / 2;
+  padding.left = horizontal_total_padding / 2;
+  padding.right = (horizontal_total_padding + 1)/ 2;
+
+  return padding;
+}
+
 template<typename T> std::unique_ptr<T> make_layer(void)
 {
   return std::unique_ptr<T>{new T};
@@ -300,6 +349,8 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &nod
   const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index};
   const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
 
+  const ::internal::tflite::operand::Index padding_index{node.param().padding_index};
+
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
 
@@ -309,6 +360,12 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &nod
   const int32_t vstride = _ctx.at(vstride_index).asScala<int32_t>();
   const int32_t hstride = _ctx.at(hstride_index).asScala<int32_t>();
 
+  const PaddingCode padding_type =
+    static_cast<PaddingCode>(_ctx.at(padding_index).asScala<int32_t>());
+
+       assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+                                (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
   // Set Shape Constraints
   _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
   _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
@@ -339,26 +396,9 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &nod
   param.stride.vertical = vstride;
   param.stride.horizontal = hstride;
 
-  // NOTE padding code is assumes as 'ANEURALNETWORKS_PADDING_SAME'
-  {
-               // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
-               //
-               // SAME padding. Padding on both ends are the "same":
-               //
-               //      padding_to_beginning = total_padding / 2
-               //  padding_to_end = (total_padding + 1)/2.
-               //
-    const int32_t vertical_needed_input = (ofm_shape.H - 1) * vstride + kh;
-    const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
-
-    const int32_t horizontal_needed_input = (ofm_shape.W - 1) * hstride + kw;
-    const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
-
-    param.padding.top = vertical_total_padding / 2;
-    param.padding.bottom = (vertical_total_padding + 1) / 2;
-    param.padding.left = horizontal_total_padding / 2;
-    param.padding.right = (horizontal_total_padding + 1)/ 2;
-  }
+  param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+                ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                : valid_padding();
 
   VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
index f501990..a335985 100644 (file)
@@ -1046,6 +1046,11 @@ typedef enum
   ANEURALNETWORKS_FUSED_RELU6 = 3,
 } FuseCode;
 
+typedef enum {
+  ANEURALNETWORKS_PADDING_SAME = 1,
+  ANEURALNETWORKS_PADDING_VALID = 2,
+} PaddingCode;
+
 //
 // Event
 //