Move depthwise-conv padding & activation resolve (#5119)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 3 May 2019 07:26:03 +0000 (16:26 +0900)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Fri, 3 May 2019 07:26:03 +0000 (16:26 +0900)
Move depthwise convolution padding & activation type resolve: backend to frontend

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/backend/acl_cl/StageGenerator.cc
runtimes/neurun/backend/acl_neon/StageGenerator.cc
runtimes/neurun/backend/cpu/StageGenerator.cc
runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc
runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.h
runtimes/neurun/core/include/model/operation/DepthwiseConv2DNode.h
runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc

index 987151e..cc46b97 100644 (file)
@@ -358,9 +358,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   const auto vstride_index{node.param().vstride_index};
   const auto hstride_index{node.param().hstride_index};
 
-  const auto padding_index{node.param().padding_index};
   const auto multiplier_index{node.param().multiplier_index};
-  const auto activation_index{node.param().activation_index};
 
   const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
 
@@ -383,7 +381,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
     neurun::util::Stride stride;
     int multiplier;
 
-    FuseCode activation;
+    model::Activation activation;
   };
 
   Param param;
@@ -396,18 +394,13 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   param.stride = stride;
 
   // TODO : Extract this to a function
+  const auto padding_type = node.param().padding;
   param.padding = [&]() {
-    if (!node.param().explicit_padding) // implicit padding
+    if (padding_type != model::Padding::EXPLICIT) // implicit padding
     {
-      const auto padding_index{node.param().padding_index};
+      assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
 
-      const PaddingCode padding_type =
-          static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
-      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
-             (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
-      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+      return (padding_type == model::Padding::SAME)
                  ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
                                               _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               ker_shape.W, ker_shape.H)
@@ -425,7 +418,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
     }
   }();
   param.multiplier = multiplier;
-  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+  param.activation = node.param().activation;
 
   auto tensors = _tensor_builder;
 
index 69d8d80..d95c281 100644 (file)
@@ -322,9 +322,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   const auto vstride_index{node.param().vstride_index};
   const auto hstride_index{node.param().hstride_index};
 
-  const auto padding_index{node.param().padding_index};
   const auto multiplier_index{node.param().multiplier_index};
-  const auto activation_index{node.param().activation_index};
 
   const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
 
@@ -347,7 +345,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
     neurun::util::Stride stride;
     int multiplier;
 
-    FuseCode activation;
+    model::Activation activation;
   };
 
   Param param;
@@ -360,18 +358,13 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   param.stride = stride;
 
   // TODO : Extract this to a function
+  const auto padding_type = node.param().padding;
   param.padding = [&]() {
-    if (!node.param().explicit_padding) // implicit padding
+    if (padding_type != model::Padding::EXPLICIT) // implicit padding
     {
-      const auto padding_index{node.param().padding_index};
-
-      const PaddingCode padding_type =
-          static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+      assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
 
-      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
-             (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
-      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+      return (padding_type == model::Padding::SAME)
                  ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
                                               _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               ker_shape.W, ker_shape.H)
@@ -389,7 +382,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
     }
   }();
   param.multiplier = multiplier;
-  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+  param.activation = node.param().activation;
 
   auto tensors = _tensor_builder;
 
index 68f3888..f66ff54 100644 (file)
@@ -170,9 +170,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   const auto vstride_index{node.param().vstride_index};
   const auto hstride_index{node.param().hstride_index};
 
-  const auto padding_index{node.param().padding_index};
   const auto multiplier_index{node.param().multiplier_index};
-  const auto activation_index{node.param().activation_index};
 
   util::Stride stride;
 
@@ -199,7 +197,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
 
     int multiplier;
 
-    FuseCode activation;
+    model::Activation activation;
   };
 
   Param param;
@@ -217,18 +215,13 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   param.stride = stride;
 
   // TODO : Extract this to a function
+  const auto padding_type = node.param().padding;
   param.padding = [&]() {
-    if (!node.param().explicit_padding) // implicit padding
+    if (padding_type != model::Padding::EXPLICIT) // implicit padding
     {
-      const auto padding_index{node.param().padding_index};
-
-      const PaddingCode padding_type =
-          static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+      assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
 
-      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
-             (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
-      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+      return (padding_type == model::Padding::SAME)
                  ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
                                               _ctx.at(ofm_index).shape().asFeature(), param.stride,
                                               _ctx.at(ker_index).shape().asKernel().W,
@@ -248,7 +241,7 @@ void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &node)
   }();
 
   param.multiplier = multiplier;
-  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+  param.activation = node.param().activation;
 
   auto tensors = _tensor_builder;
 
index 8ad3ad4..7c956da 100644 (file)
@@ -31,7 +31,7 @@ DepthwiseConvolutionLayer::DepthwiseConvolutionLayer()
     : _inputData(), _kernelData(), _outputData(), _biasData(), _inputShape(), _kernelShape(),
       _outputShape(), _biasShape(), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
       _paddingBottom(0), _strideWidth(0), _strideHeight(0), _multiplier(0),
-      _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::FLOAT32)
+      _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
 {
   // DO NOTHING
 }
@@ -60,14 +60,12 @@ void DepthwiseConvolutionLayer::convFloat32()
 
 void DepthwiseConvolutionLayer::convQuant8() { throw "NYI"; }
 
-void DepthwiseConvolutionLayer::configure(uint8_t *inputData, const Shape inputShape,
-                                          uint8_t *kernelData, const Shape kernelShape,
-                                          uint8_t *biasData, const Shape biasShape,
-                                          const uint32_t paddingLeft, const uint32_t paddingRight,
-                                          const uint32_t paddingTop, const uint32_t paddingBottom,
-                                          const uint32_t strideWidth, const uint32_t strideHeight,
-                                          const uint32_t multiplier, const FuseCode activation,
-                                          uint8_t *outputData, const Shape outputShape)
+void DepthwiseConvolutionLayer::configure(
+    uint8_t *inputData, const Shape inputShape, uint8_t *kernelData, const Shape kernelShape,
+    uint8_t *biasData, const Shape biasShape, const uint32_t paddingLeft,
+    const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom,
+    const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t multiplier,
+    const model::Activation activation, uint8_t *outputData, const Shape outputShape)
 {
   _inputData.u8 = inputData;
   _inputShape = inputShape;
index e29c9bd..1a869f7 100644 (file)
@@ -46,7 +46,7 @@ public:
                  const Shape kernelShape, uint8_t *biasData, const Shape biasShape,
                  const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
                  const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
-                 const uint32_t multiplier, const FuseCode activation, uint8_t *outputData,
+                 const uint32_t multiplier, const model::Activation activation, uint8_t *outputData,
                  const Shape outputShape);
 
   void run();
@@ -72,7 +72,7 @@ private:
 
   uint32_t _multiplier;
 
-  FuseCode _activation;
+  model::Activation _activation;
 
   OperandType _inputType;
 };
index 67be738..5d27eb5 100644 (file)
@@ -20,6 +20,7 @@
 #include <memory>
 
 #include "model/Operation.h"
+#include "model/InternalType.h"
 
 namespace neurun
 {
@@ -43,7 +44,7 @@ public:
     OperandIndex hstride_index;
     OperandIndex vstride_index;
 
-    OperandIndex padding_index;
+    Padding padding;
 
     OperandIndex padding_left_index;
     OperandIndex padding_right_index;
@@ -51,9 +52,7 @@ public:
     OperandIndex padding_bottom_index;
 
     OperandIndex multiplier_index;
-    OperandIndex activation_index;
-
-    bool explicit_padding;
+    Activation activation;
   };
 
 public:
index 110afd3..5382f13 100644 (file)
@@ -42,7 +42,7 @@ OperationFactory::OperationFactory()
   using namespace neurun::model;
 
   _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param,
-                                               neurun::model::Operands &) {
+                                               Operands &operands) {
     assert((init_param.input_count == 8 || init_param.input_count == 11) &&
            init_param.output_count == 1);
 
@@ -65,13 +65,16 @@ OperationFactory::OperationFactory()
       // 6 -> Depthwise multiplier
       // 7 -> Activation Index
 
-      param.padding_index = OperandIndex{init_param.inputs[3]};
+      const auto padding_index = OperandIndex{init_param.inputs[3]};
+      const auto activation_index = OperandIndex{init_param.inputs[7]};
+
+      param.padding =
+          NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
       param.hstride_index = OperandIndex{init_param.inputs[4]};
       param.vstride_index = OperandIndex{init_param.inputs[5]};
       param.multiplier_index = OperandIndex{init_param.inputs[6]};
-      param.activation_index = OperandIndex{init_param.inputs[7]};
-
-      param.explicit_padding = false;
+      param.activation =
+          NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
     }
     else
     {
@@ -87,6 +90,9 @@ OperationFactory::OperationFactory()
       // 9 -> Depthwise multiplier
       // 10-> Activation Index
 
+      const auto activation_index = OperandIndex{init_param.inputs[10]};
+
+      param.padding = Padding::EXPLICIT;
       param.padding_left_index = OperandIndex{init_param.inputs[3]};
       param.padding_right_index = OperandIndex{init_param.inputs[4]};
       param.padding_top_index = OperandIndex{init_param.inputs[5]};
@@ -94,9 +100,8 @@ OperationFactory::OperationFactory()
       param.hstride_index = OperandIndex{init_param.inputs[7]};
       param.vstride_index = OperandIndex{init_param.inputs[8]};
       param.multiplier_index = OperandIndex{init_param.inputs[9]};
-      param.activation_index = OperandIndex{init_param.inputs[10]};
-
-      param.explicit_padding = true;
+      param.activation =
+          NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
     }
 
     return new operation::DepthwiseConv2DNode{inputs, outputs, param};