Resolve param value for l2 pool at frontend (#5304)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 30 May 2019 04:20:25 +0000 (13:20 +0900)
committer박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 30 May 2019 04:20:25 +0000 (13:20 +0900)
- Remove index param in l2pool2d node and resolve value at frontend
- Use resolved constant param value at acl_cl backend

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/backend/acl_cl/StageGenerator.cc
runtimes/neurun/core/include/model/operation/L2Pool2DNode.h
runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc

index dec3645..5aa84c4 100644 (file)
@@ -1898,23 +1898,9 @@ void StageGenerator::visit(const model::operation::L2Pool2DNode &node)
   const auto ofm_index{node.getOutputs().at(0)};
   const auto ifm_index{node.getInputs().at(model::operation::L2Pool2DNode::Input::INPUT)};
 
-  const auto kh_index{node.param().kh_index};
-  const auto kw_index{node.param().kw_index};
-
-  const auto vstride_index{node.param().vstride_index};
-  const auto hstride_index{node.param().hstride_index};
-
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
 
-  const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
-  const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
-
-  neurun::model::Stride stride;
-
-  stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
-  stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
-
   // Construct operation parameters
   struct Param
   {
@@ -1934,12 +1920,11 @@ void StageGenerator::visit(const model::operation::L2Pool2DNode &node)
   param.ofm_index = ofm_index;
   param.ifm_index = ifm_index;
 
-  param.kw = kw;
-  param.kh = kh;
-
-  param.stride = stride;
+  param.kw = node.param().kw;
+  param.kh = node.param().kh;
+  param.stride = node.param().stride;
   param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape,
-                                                 param.stride, kw, kh);
+                                                 param.stride, param.kw, param.kh);
   param.activation = node.param().activation;
 
   auto tensors = _tensor_builder;
index 5571b8f..4586b87 100644 (file)
@@ -41,13 +41,8 @@ public:
   {
     Padding padding;
     Stride stride;
-    // hstride_index and vtride_index will be deprecated
-    OperandIndex hstride_index;
-    OperandIndex vstride_index;
-
-    OperandIndex kw_index;
-    OperandIndex kh_index;
-
+    uint32_t kw;
+    uint32_t kh;
     Activation activation;
   };
 
index 3fa5ce8..b266a32 100644 (file)
@@ -981,14 +981,17 @@ OperationFactory::OperationFactory()
       //  5 -> Filter Height Index
       //  6 -> FuseCode (activation) Index
       const auto padding_index = OperandIndex{init_param.inputs[1]};
+      const auto hstride_index = OperandIndex{init_param.inputs[2]};
+      const auto vstride_index = OperandIndex{init_param.inputs[3]};
+      const auto kw_index = OperandIndex{init_param.inputs[4]};
+      const auto kh_index = OperandIndex{init_param.inputs[5]};
       const auto activation_index = OperandIndex{init_param.inputs[6]};
 
       param.padding.type =
           NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
-      param.hstride_index = OperandIndex{init_param.inputs[2]};
-      param.vstride_index = OperandIndex{init_param.inputs[3]};
-      param.kw_index = OperandIndex{init_param.inputs[4]};
-      param.kh_index = OperandIndex{init_param.inputs[5]};
+      param.stride = makeStride(operands, hstride_index, vstride_index);
+      param.kw = getUint32Scalar(operands, kw_index);
+      param.kh = getUint32Scalar(operands, kh_index);
       param.activation =
           NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
     }
@@ -1007,15 +1010,18 @@ OperationFactory::OperationFactory()
       const auto padding_right_index = OperandIndex{init_param.inputs[2]};
       const auto padding_top_index = OperandIndex{init_param.inputs[3]};
       const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
+      const auto hstride_index = OperandIndex{init_param.inputs[5]};
+      const auto vstride_index = OperandIndex{init_param.inputs[6]};
+      const auto kw_index = OperandIndex{init_param.inputs[7]};
+      const auto kh_index = OperandIndex{init_param.inputs[8]};
       const auto activation_index = OperandIndex{init_param.inputs[9]};
 
       param.padding.type = PaddingType::EXPLICIT;
       param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
                                                 padding_top_index, padding_bottom_index);
-      param.hstride_index = OperandIndex{init_param.inputs[5]};
-      param.vstride_index = OperandIndex{init_param.inputs[6]};
-      param.kw_index = OperandIndex{init_param.inputs[7]};
-      param.kh_index = OperandIndex{init_param.inputs[8]};
+      param.stride = makeStride(operands, hstride_index, vstride_index);
+      param.kw = getUint32Scalar(operands, kw_index);
+      param.kh = getUint32Scalar(operands, kh_index);
       param.activation =
           NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
     }