[neurun] Enable L2Pool2D op (#4688)
author김수진/On-Device Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Thu, 14 Mar 2019 00:29:09 +0000 (09:29 +0900)
committer박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 14 Mar 2019 00:29:09 +0000 (09:29 +0900)
This commit enables `L2Pool2D` op for `acl_cl`.

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
runtimes/neurun/frontend/nnapi/frontend/wrapper/OperationFactory.cc
runtimes/neurun/src/backend/acl_cl/Convert.cc
runtimes/neurun/src/backend/acl_cl/Convert.h
runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
runtimes/neurun/src/backend/acl_cl/StageGenerator.h
runtimes/neurun/src/model/operation/L2Pool2DNode.cc [new file with mode: 0644]
runtimes/neurun/src/model/operation/L2Pool2DNode.h [new file with mode: 0644]
runtimes/neurun/src/model/operation/Node.Include.h
runtimes/neurun/src/model/operation/Op.lst
tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt

index 2c4873a..2feca82 100644 (file)
@@ -729,6 +729,64 @@ OperationFactory::OperationFactory()
 
     return new operation::SpaceToDepthNode{inputs, outputs, param};
   };
+
+  _map[ANEURALNETWORKS_L2_POOL_2D] = [](const OperationFactory::Param &init_param) {
+    assert(init_param.input_count == 10 || init_param.input_count == 7);
+    assert(init_param.output_count == 1);
+
+    operand::IndexSet outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> IFM Tensor Index
+    operand::IndexSet inputs{init_param.inputs[0]};
+
+    operation::L2Pool2DNode::Param param;
+
+    if (init_param.input_count == 7) // Imlicit Padding case
+    {
+      //  1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+      //  2 -> Horizontal (over width) Stride Index
+      //  3 -> Vertial (over height) Stride Index
+      //  4 -> Filter Width Index
+      //  5 -> Filter Height Index
+      //  6 -> FuseCode (activation) Index
+      param.padding_index = operand::Index{init_param.inputs[1]};
+      param.hstride_index = operand::Index{init_param.inputs[2]};
+      param.vstride_index = operand::Index{init_param.inputs[3]};
+      param.kw_index = operand::Index{init_param.inputs[4]};
+      param.kh_index = operand::Index{init_param.inputs[5]};
+      param.activation_index = operand::Index{init_param.inputs[6]};
+
+      param.explicit_padding = false;
+    }
+    else // Explicit Padding case
+    {
+      //  1 -> Padding_left index
+      //  2 -> Padding_right index
+      //  3 -> Padding_top index
+      //  4 -> Padding_bottom index
+      //  5 -> Horizontal (over width) Stride Index
+      //  6 -> Vertial (over height) Stride Index
+      //  7 -> Filter Width Index
+      //  8 -> Filter Height Index
+      //  9 -> FuseCode (activation) Index
+
+      param.padding_left_index = operand::Index{init_param.inputs[1]};
+      param.padding_right_index = operand::Index{init_param.inputs[2]};
+      param.padding_top_index = operand::Index{init_param.inputs[3]};
+      param.padding_bottom_index = operand::Index{init_param.inputs[4]};
+      param.hstride_index = operand::Index{init_param.inputs[5]};
+      param.vstride_index = operand::Index{init_param.inputs[6]};
+      param.kw_index = operand::Index{init_param.inputs[7]};
+      param.kh_index = operand::Index{init_param.inputs[8]};
+      param.activation_index = operand::Index{init_param.inputs[9]};
+
+      param.explicit_padding = true;
+    }
+
+    return new operation::L2Pool2DNode{inputs, outputs, param};
+  };
 }
 
 neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type,
index f457fa4..9a95f6f 100644 (file)
@@ -84,6 +84,18 @@ namespace acl_cl
                                    asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
 }
 
+::arm_compute::PadStrideInfo asPadStrideInfo(const neurun::util::Padding &padding,
+                                             const neurun::util::Stride &stride)
+{
+  return ::arm_compute::PadStrideInfo{stride.horizontal,
+                                      stride.vertical,
+                                      padding.left,
+                                      padding.right,
+                                      padding.top,
+                                      padding.bottom,
+                                      ::arm_compute::DimensionRoundingType::FLOOR};
+}
+
 } // namespace acl_cl
 } // namespace backend
 } // namespace neurun
index 6d83dbb..9e1349c 100644 (file)
@@ -27,6 +27,8 @@
 #include "misc/feature/Shape.h"
 #include "misc/kernel/Shape.h"
 
+#include "util/Padding.h"
+
 namespace neurun
 {
 namespace backend
@@ -41,6 +43,9 @@ namespace acl_cl
                                        const ::neurun::model::operand::TypeInfo &typeInfo,
                                        bool apply_dim_correction = true);
 
+::arm_compute::PadStrideInfo asPadStrideInfo(const neurun::util::Padding &padding,
+                                             const neurun::util::Stride &stride);
+
 } // namespace acl_cl
 } // namespace backend
 } // namespace neurun
index b07f887..78bcca4 100644 (file)
@@ -1940,6 +1940,111 @@ void StageGenerator::visit(const model::operation::SpaceToDepthNode &node)
   });
 }
 
+void StageGenerator::visit(const model::operation::L2Pool2DNode &node)
+{
+  const auto ofm_index{node.getOutputs().at(0)};
+  const auto ifm_index{node.getInputs().at(model::operation::L2Pool2DNode::Input::INPUT)};
+
+  const auto kh_index{node.param().kh_index};
+  const auto kw_index{node.param().kw_index};
+
+  const auto vstride_index{node.param().vstride_index};
+  const auto hstride_index{node.param().hstride_index};
+
+  const auto activation_index{node.param().activation_index};
+
+  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+  const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+  const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+  const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+  neurun::util::Stride stride;
+
+  stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
+  stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
+
+  // Construct operation parameters
+  struct Param
+  {
+    model::operand::Index ofm_index;
+    model::operand::Index ifm_index;
+
+    uint32_t kw;
+    uint32_t kh;
+
+    neurun::util::Padding padding;
+    neurun::util::Stride stride;
+
+    FuseCode activation;
+  };
+
+  Param param;
+
+  param.ofm_index = ofm_index;
+  param.ifm_index = ifm_index;
+
+  param.kw = kw;
+  param.kh = kh;
+
+  param.stride = stride;
+
+  // TODO : Extract this to a function
+  param.padding = [&]() {
+    if (!node.param().explicit_padding) // implicit padding
+    {
+      const auto padding_index{node.param().padding_index};
+
+      const PaddingCode padding_type =
+          static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+      assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+             (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+      return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+                 ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, kw, kh)
+                 : neurun::util::valid_padding();
+    }
+    else // explicit padding
+    {
+      neurun::util::Padding padding;
+      padding.left = _ctx.at({node.param().padding_left_index}).asScalar<int32_t>();
+      padding.right = _ctx.at({node.param().padding_right_index}).asScalar<int32_t>();
+      padding.top = _ctx.at({node.param().padding_top_index}).asScalar<int32_t>();
+      padding.bottom = _ctx.at({node.param().padding_bottom_index}).asScalar<int32_t>();
+
+      return padding;
+    }
+  }();
+
+  param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+  auto tensors = _tensor_builder;
+
+  returnStage([tensors, param](IExecutionBuilder &builder) {
+    auto ofm_alloc = tensors->at(param.ofm_index).get();
+    auto ifm_alloc = tensors->at(param.ifm_index).get();
+
+    ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::L2,
+                                         ::arm_compute::Size2D{param.kw, param.kh},
+                                         asPadStrideInfo(param.padding, param.stride)};
+
+    std::unique_ptr<::arm_compute::IFunction> fn;
+
+    auto l = make_layer<::arm_compute::CLPoolingLayer>();
+
+    l->configure(ifm_alloc->handle(), ofm_alloc->handle(), info);
+
+    fn = std::move(l);
+
+    auto acl_fn = make_cl_function(std::move(fn));
+
+    builder.append(std::move(acl_fn));
+
+    ActivationBuilder{builder}.append(param.activation, ofm_alloc->handle());
+  });
+}
+
 } // namespace acl_cl
 } // namespace backend
 } // namespace neurun
index c552f1b..5a60893 100644 (file)
@@ -67,6 +67,7 @@ public:
   virtual void visit(const model::operation::ReLU6Node &) override;
   virtual void visit(const model::operation::FloorNode &) override;
   virtual void visit(const model::operation::SpaceToDepthNode &) override;
+  virtual void visit(const model::operation::L2Pool2DNode &) override;
 
 private:
   const neurun::model::operand::Set &_ctx;
diff --git a/runtimes/neurun/src/model/operation/L2Pool2DNode.cc b/runtimes/neurun/src/model/operation/L2Pool2DNode.cc
new file mode 100644 (file)
index 0000000..78d13ea
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2Pool2DNode.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void L2Pool2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+L2Pool2DNode::L2Pool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+                           const Param &param)
+    : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/src/model/operation/L2Pool2DNode.h b/runtimes/neurun/src/model/operation/L2Pool2DNode.h
new file mode 100644 (file)
index 0000000..0dd17c1
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
+
+#include <memory>
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class L2Pool2DNode : public model::operation::Node
+{
+public:
+  enum Input
+  {
+    INPUT = 0,
+  };
+
+  struct Param
+  {
+    operand::Index padding_index;
+
+    operand::Index padding_left_index;
+    operand::Index padding_right_index;
+    operand::Index padding_top_index;
+    operand::Index padding_bottom_index;
+
+    operand::Index hstride_index;
+    operand::Index vstride_index;
+
+    operand::Index kw_index;
+    operand::Index kh_index;
+
+    operand::Index activation_index;
+
+    bool explicit_padding;
+  };
+
+public:
+  L2Pool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+               const Param &param);
+
+public:
+  virtual void accept(NodeVisitor &&) const override;
+  virtual std::string getName() const override { return "L2Pool2D"; }
+
+public:
+  const Param &param() const { return _param; }
+
+private:
+  Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
index b4aba7f..4c50751 100644 (file)
@@ -47,3 +47,4 @@
 #include "ReLU6Node.h"
 #include "FloorNode.h"
 #include "SpaceToDepthNode.h"
+#include "L2Pool2DNode.h"
index da17e6d..085138a 100644 (file)
@@ -51,4 +51,5 @@ OP(ReLU1Node               , true    , RELU1)
 OP(ReLU6Node               , true    , RELU6)
 OP(FloorNode               , true    , FLOOR)
 OP(SpaceToDepthNode        , true    , SPACE_TO_DEPTH)
+OP(L2Pool2DNode            , true    , L2_POOL_2D)
 OP(PermuteNode             , false   , NOT_AVAILABLE)
index 0553fc4..93f1c1f 100644 (file)
@@ -20,7 +20,6 @@ GeneratedTests.embedding_lookup_4d_nnfw
 GeneratedTests.equal_ex*
 GeneratedTests.hashtable_lookup*
 GeneratedTests.l2_normalization*
-GeneratedTests.l2_pool*
 GeneratedTests.local_response_norm*
 GeneratedTests.logical_or_ex*
 GeneratedTests.lsh_projection*
index adaa90e..421efe5 100644 (file)
@@ -9,6 +9,7 @@ div/broadcast
 exp
 floor
 fullyconnected/fc1
+l2_pool_2d
 max_pool_2d
 MODELS/mobilenet
 mul/broadcast