Move maxpool padding & activation resolve (#5120)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 3 May 2019 07:53:13 +0000 (16:53 +0900)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Fri, 3 May 2019 07:53:13 +0000 (16:53 +0900)
Move maxpool padding & activation type resolve: backend to frontend

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/backend/acl_cl/StageGenerator.cc
runtimes/neurun/backend/acl_neon/StageGenerator.cc
runtimes/neurun/backend/cpu/StageGenerator.cc
runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc
runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.h
runtimes/neurun/core/include/model/operation/MaxPool2DNode.h
runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc

index d7f31fb..56d1fba 100644 (file)
@@ -448,8 +448,6 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
   const auto vstride_index{node.param().vstride_index};
   const auto hstride_index{node.param().hstride_index};
 
-  const auto activation_index{node.param().activation_index};
-
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
 
@@ -471,7 +469,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
     neurun::util::Padding padding;
     neurun::util::Stride stride;
 
-    FuseCode activation;
+    model::Activation activation;
   };
 
   Param param;
@@ -486,20 +484,13 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
   param.stride.horizontal = hstride;
 
   // TODO : Extract this to a function
+  const auto padding_type = node.param().padding;
   param.padding = [&]() {
-    if (!node.param().explicit_padding) // implicit padding
+    if (padding_type != model::Padding::EXPLICIT) // implicit padding
     {
-      const auto padding_code_index{node.param().padding_code_index};
-
-      const PaddingCode padding_type =
-          static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
-
-      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
-             (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
-      VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
+      assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
 
-      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+      return (padding_type == model::Padding::SAME)
                  ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                  : neurun::util::valid_padding();
     }
@@ -515,7 +506,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
     }
   }();
 
-  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+  param.activation = node.param().activation;
 
   VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
index f963d53..351c248 100644 (file)
@@ -412,8 +412,6 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
   const auto vstride_index{node.param().vstride_index};
   const auto hstride_index{node.param().hstride_index};
 
-  const auto activation_index{node.param().activation_index};
-
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
 
@@ -435,7 +433,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
     neurun::util::Padding padding;
     neurun::util::Stride stride;
 
-    FuseCode activation;
+    model::Activation activation;
   };
 
   Param param;
@@ -450,20 +448,13 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
   param.stride.horizontal = hstride;
 
   // TODO : Extract this to a function
+  const auto padding_type = node.param().padding;
   param.padding = [&]() {
-    if (!node.param().explicit_padding) // implicit padding
+    if (padding_type != model::Padding::EXPLICIT) // implicit padding
     {
-      const auto padding_code_index{node.param().padding_code_index};
-
-      const PaddingCode padding_type =
-          static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
-
-      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
-             (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
-      VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
+      assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
 
-      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+      return (padding_type == model::Padding::SAME)
                  ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
                  : neurun::util::valid_padding();
     }
@@ -479,7 +470,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
     }
   }();
 
-  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+  param.activation = node.param().activation;
 
   VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
index 12b51b6..7a89765 100644 (file)
@@ -268,8 +268,6 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
   const auto vstride_index{node.param().vstride_index};
   const auto hstride_index{node.param().hstride_index};
 
-  const auto activation_index{node.param().activation_index};
-
   const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
   const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
 
@@ -291,7 +289,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
     util::Padding padding;
     util::Stride stride;
 
-    FuseCode activation;
+    model::Activation activation;
   };
 
   Param param;
@@ -309,18 +307,13 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
   param.stride.horizontal = hstride;
 
   // TODO : Extract this to a function
+  const auto padding_type = node.param().padding;
   param.padding = [&]() {
-    if (!node.param().explicit_padding) // implicit padding
+    if (padding_type != model::Padding::EXPLICIT) // implicit padding
     {
-      const auto padding_code_index{node.param().padding_code_index};
-
-      const PaddingCode padding_type =
-          static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
-
-      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
-             (ANEURALNETWORKS_PADDING_VALID == padding_type));
+      assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
 
-      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+      return (padding_type == model::Padding::SAME)
                  ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
                                               _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               kw, kh)
@@ -338,7 +331,7 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node)
     }
   }();
 
-  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+  param.activation = node.param().activation;
 
   auto tensors = _tensor_builder;
 
index 22c823b..0aaaf92 100644 (file)
@@ -41,7 +41,7 @@ namespace kernel
 MaxPoolLayer::MaxPoolLayer()
     : _inputData(), _outputData(), _inputShape(), _outputShape(), _paddingLeft(0), _paddingTop(0),
       _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0),
-      _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::FLOAT32)
+      _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
 }
@@ -75,7 +75,7 @@ void MaxPoolLayer::configure(uint8_t *inputData, const Shape inputShape, const u
                              const uint32_t paddingRight, const uint32_t paddingTop,
                              const uint32_t paddingBottom, const uint32_t strideWidth,
                              const uint32_t strideHeight, const uint32_t kernelWidth,
-                             const uint32_t kernelHeight, const FuseCode activation,
+                             const uint32_t kernelHeight, const model::Activation activation,
                              uint8_t *outputData, const Shape outputShape)
 {
   _inputData.u8 = inputData;
index b8f1f9e..0991b73 100644 (file)
@@ -46,8 +46,8 @@ public:
                  const uint32_t paddingRight, const uint32_t paddingTop,
                  const uint32_t paddingBottom, const uint32_t strideWidth,
                  const uint32_t strideHeight, const uint32_t kernelWidth,
-                 const uint32_t kernelHeight, const FuseCode activation, uint8_t *outputData,
-                 const Shape outputShape);
+                 const uint32_t kernelHeight, const model::Activation activation,
+                 uint8_t *outputData, const Shape outputShape);
 
   void run();
 
@@ -68,7 +68,7 @@ private:
   uint32_t _kernelWidth;
   uint32_t _kernelHeight;
 
-  FuseCode _activation;
+  model::Activation _activation;
 
   OperandType _inputType;
 };
index 5868671..8217e70 100644 (file)
@@ -20,6 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
+#include "model/InternalType.h"
 
 namespace neurun
 {
@@ -44,16 +45,14 @@ public:
     OperandIndex hstride_index;
     OperandIndex vstride_index;
 
-    OperandIndex padding_code_index;
+    Padding padding;
 
     OperandIndex padding_left_index;
     OperandIndex padding_right_index;
     OperandIndex padding_top_index;
     OperandIndex padding_bottom_index;
 
-    OperandIndex activation_index;
-
-    bool explicit_padding;
+    Activation activation;
   };
 
 public:
index 5869455..516ad4c 100644 (file)
@@ -108,7 +108,7 @@ OperationFactory::OperationFactory()
   };
 
   _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param,
-                                         neurun::model::Operands &) {
+                                         Operands &operands) {
     assert(init_param.input_count == 7 || init_param.input_count == 10);
     assert(init_param.output_count == 1);
 
@@ -129,15 +129,19 @@ OperationFactory::OperationFactory()
       //  5 -> Filter Height Index
       //  6 -> FuseCode (activation) Index
 
-      param.explicit_padding = false;
+      const auto padding_index = OperandIndex{init_param.inputs[1]};
+      const auto activation_index = OperandIndex{init_param.inputs[6]};
+
+      param.padding =
+          NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
 
-      param.padding_code_index = OperandIndex{init_param.inputs[1]};
       param.hstride_index = OperandIndex{init_param.inputs[2]};
       param.vstride_index = OperandIndex{init_param.inputs[3]};
 
       param.kw_index = OperandIndex{init_param.inputs[4]};
       param.kh_index = OperandIndex{init_param.inputs[5]};
-      param.activation_index = OperandIndex{init_param.inputs[6]};
+      param.activation =
+          NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
     }
     else if (init_param.input_count == 10) // support explicit padding
     {
@@ -153,7 +157,9 @@ OperationFactory::OperationFactory()
       //  8 -> Filter Height Index
       //  9 -> FuseCode (activation) Index
 
-      param.explicit_padding = true;
+      const auto activation_index = OperandIndex{init_param.inputs[9]};
+
+      param.padding = Padding::EXPLICIT;
 
       param.padding_left_index = OperandIndex{init_param.inputs[1]};
       param.padding_right_index = OperandIndex{init_param.inputs[2]};
@@ -164,7 +170,8 @@ OperationFactory::OperationFactory()
 
       param.kw_index = OperandIndex{init_param.inputs[7]};
       param.kh_index = OperandIndex{init_param.inputs[8]};
-      param.activation_index = OperandIndex{init_param.inputs[9]};
+      param.activation =
+          NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
     }
     return new operation::MaxPool2DNode{inputs, outputs, param};
   };