Add explicit padding to average pooling (#1928)
author최성진/동작제어Lab(SR)/Principal Engineer/삼성전자 <lotieye.choi@samsung.com>
Wed, 11 Jul 2018 00:50:08 +0000 (09:50 +0900)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Wed, 11 Jul 2018 00:50:08 +0000 (09:50 +0900)
* Add explicit padding to average pooling

This commit adds explicit padding to average pooling.

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
* Remove unused and unnecessary blank

This commit removes unused and unnecessary blank

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
* Revise namespace as Explicit

This commit revises namespace as Explicit for consistency.

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc
runtimes/pure_arm_compute/src/internal/op/AvgPool2D.cc
runtimes/pure_arm_compute/src/internal/op/AvgPool2D.h
runtimes/pure_arm_compute/src/internal/op/NodeVisitor.h
runtimes/pure_arm_compute/src/model.cc

index 7ab516e..73d3dfc 100644 (file)
@@ -331,6 +331,7 @@ public:
   void visit(const ::internal::tflite::op::Dequantize::Node &node) override;
   void visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
   void visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
+  void visit(const ::internal::tflite::op::AvgPool2D::Explicit::Node &node) override;
   void visit(const ::internal::tflite::op::Concat::Node &node) override;
   void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
   void visit(const ::internal::tflite::op::ResizeBilinear::Node &node) override;
@@ -1203,6 +1204,110 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &nod
   _builder.addStage(stage);
 }
 
+void Planner::visit(const ::internal::tflite::op::AvgPool2D::Explicit::Node &node)
+{
+  const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+  const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+
+  const ::internal::tflite::operand::Index kh_index{node.param().kh_index};
+  const ::internal::tflite::operand::Index kw_index{node.param().kw_index};
+
+  const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index};
+  const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
+
+  const ::internal::tflite::operand::Index padding_left_index{node.param().padding_left_index};
+  const ::internal::tflite::operand::Index padding_right_index{node.param().padding_right_index};
+  const ::internal::tflite::operand::Index padding_top_index{node.param().padding_top_index};
+  const ::internal::tflite::operand::Index padding_bottom_index{node.param().padding_bottom_index};
+
+  const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
+
+  // TODO 4D tensor (dim(0) != 1)
+  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+  const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+  const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+  const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+  const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
+  const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
+
+  const int32_t padding_left = _ctx.at(padding_left_index).asScalar<int32_t>();
+  const int32_t padding_right = _ctx.at(padding_right_index).asScalar<int32_t>();
+  const int32_t padding_top = _ctx.at(padding_top_index).asScalar<int32_t>();
+  const int32_t padding_bottom = _ctx.at(padding_bottom_index).asScalar<int32_t>();
+
+  // TODO Should move to the place where the operand is handled, if it is possible.
+  // Set Shape Constraints and TensorInfo
+  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+
+  // Construct operation parameters
+  struct Param
+  {
+    int ofm_index;
+    int ifm_index;
+
+    uint32_t kw;
+    uint32_t kh;
+
+    Padding padding;
+    Stride stride;
+
+    FuseCode activation;
+  };
+
+  Param param;
+
+  param.ofm_index = ofm_index.asInt();
+  param.ifm_index = ifm_index.asInt();
+
+  param.kh = kh;
+  param.kw = kw;
+
+  param.stride.vertical = vstride;
+  param.stride.horizontal = hstride;
+
+  param.padding.left = padding_left;
+  param.padding.right = padding_right;
+  param.padding.top = padding_top;
+  param.padding.bottom = padding_bottom;
+
+  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+  VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
+  VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
+  VERBOSE(AvgPool2D) << "OFM_H: " << ofm_shape.H << std::endl;
+  VERBOSE(AvgPool2D) << "OFM_W: " << ofm_shape.W << std::endl;
+  VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl;
+  VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl;
+  VERBOSE(AvgPool2D) << "STRIDE_H: " << vstride << std::endl;
+  VERBOSE(AvgPool2D) << "STRIDE_W: " << hstride << std::endl;
+  VERBOSE(AvgPool2D) << "PAD(T): " << param.padding.top << std::endl;
+  VERBOSE(AvgPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
+  VERBOSE(AvgPool2D) << "PAD(L): " << param.padding.left << std::endl;
+  VERBOSE(AvgPool2D) << "PAD(R): " << param.padding.right << std::endl;
+
+  auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+    auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
+    auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
+
+    ::arm_compute::PoolingLayerInfo info{
+        ::arm_compute::PoolingType::AVG, ::arm_compute::Size2D{param.kw, param.kh},
+        asPadStringInfo(param.padding, param.stride), true /* exclude_padding */};
+
+    std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
+
+    fn->configure(ifm_alloc, ofm_alloc, info);
+
+    builder.append("AvgPool2D", std::move(fn));
+
+    ActivationBuilder{builder}.append(param.activation, ofm_alloc);
+  };
+
+  _builder.addStage(stage);
+}
+
 void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
 {
   const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
index a0bfb37..0e7a680 100644 (file)
@@ -11,6 +11,13 @@ namespace op
 {
 namespace AvgPool2D
 {
+namespace Explicit
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+} // namespace Explicit
+
 namespace implicit
 {
 
@@ -30,6 +37,42 @@ namespace op
 {
 namespace AvgPool2D
 {
+namespace Explicit
+{
+
+Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
+             const uint32_t *outputs)
+{
+  assert(inputCount == 10 && outputCount == 1);
+
+  ofm_index = outputs[0];
+
+  // Each input should be interpreted as follows:
+  //
+  //  0 -> IFM Tensor Index
+  //  1 -> Padding_left index
+  //  2 -> Padding_right index
+  //  3 -> Padding_top index
+  //  4 -> Padding_bottom index
+  //  5 -> Horizontal (over width) Stride Index
+  //  6 -> Vertial (over height) Stride Index
+  //  7 -> Filter Width Index
+  //  8 -> Filter Height Index
+  //  9 -> FuseCode (activation) Index
+  ifm_index = inputs[0];
+  padding_left_index = inputs[1];
+  padding_right_index = inputs[2];
+  padding_top_index = inputs[3];
+  padding_bottom_index = inputs[4];
+  hstride_index = inputs[5];
+  vstride_index = inputs[6];
+  kw_index = inputs[7];
+  kh_index = inputs[8];
+  activation_index = inputs[9];
+}
+
+} // namespace Explicit
+
 namespace implicit
 {
 
index 1696878..e342582 100644 (file)
@@ -13,6 +13,55 @@ namespace op
 {
 namespace AvgPool2D
 {
+namespace Explicit
+{
+
+struct Param
+{
+  int32_t ofm_index;
+
+  int32_t ifm_index;
+
+  int32_t kw_index;
+  int32_t kh_index;
+
+  int32_t hstride_index;
+  int32_t vstride_index;
+
+  int32_t padding_left_index;
+  int32_t padding_right_index;
+  int32_t padding_top_index;
+  int32_t padding_bottom_index;
+
+  int32_t activation_index;
+
+  Param() = default;
+  Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
+};
+
+class Node final : public op::Node
+{
+public:
+  Node(const Param &param) : _param(param)
+  {
+    // DO NOTHING
+  }
+
+public:
+  virtual ~Node() = default;
+
+public:
+  const Param &param(void) const { return _param; }
+
+public:
+  void accept(NodeVisitor &&) const override;
+
+private:
+  const Param _param;
+};
+
+} // namespace Explicit
+
 namespace implicit
 {
 
index 68cf28d..38a4750 100644 (file)
@@ -45,6 +45,7 @@ struct NodeVisitor
   virtual void visit(const Dequantize::Node &) = 0;
   virtual void visit(const MaxPool2D::implicit::Node &) = 0;
   virtual void visit(const AvgPool2D::implicit::Node &) = 0;
+  virtual void visit(const AvgPool2D::Explicit::Node &) = 0;
   virtual void visit(const Concat::Node &) = 0;
   virtual void visit(const Reshape::Node &) = 0;
   virtual void visit(const ResizeBilinear::Node &) = 0;
index cdd2b44..8d4052c 100644 (file)
@@ -225,10 +225,10 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
     }
     case ANEURALNETWORKS_AVERAGE_POOL_2D:
     {
-      // inputCount is either 7 or 9 acccording to NN API specification.
+      // inputCount is either 7 or 10 acccording to NN API specification.
       //  - Padding is implicit when inputCount is 7
-      //  - Padding is explicit when inputCount is 9
-      assert(inputCount == 7 || inputCount == 9);
+      //  - Padding is explicit when inputCount is 10
+      assert(inputCount == 7 || inputCount == 10);
       assert(outputCount == 1);
 
       if (inputCount == 7)
@@ -243,7 +243,13 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
       }
       else
       {
-        throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
+        using internal::tflite::op::AvgPool2D::Explicit::Param;
+        using internal::tflite::op::AvgPool2D::Explicit::Node;
+
+        // Add 'operations'
+        auto &operations = model->deref().operations();
+
+        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
       }
 
       break;