From c166f322524db6a58418a0f2b8bdf760ace493eb Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 28 May 2019 08:26:57 +0900 Subject: [PATCH] Resolve stride value for convolution at frontend (#5282) Remove stride index param in con2d node Resolve stride value for conv2d at frontend Use resolved constant stride value at each backend Signed-off-by: Hyeongseok Oh --- runtimes/neurun/backend/acl_cl/StageGenerator.cc | 10 +------- runtimes/neurun/backend/acl_neon/StageGenerator.cc | 10 +------- runtimes/neurun/backend/cpu/StageGenerator.cc | 16 +++--------- .../core/include/model/operation/Conv2DNode.h | 4 --- .../frontend/nnapi/wrapper/OperationFactory.cc | 29 ++++++++++++++++++---- runtimes/neurun/test/graph/operation/SetIO.cc | 4 +-- 6 files changed, 32 insertions(+), 41 deletions(-) diff --git a/runtimes/neurun/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/backend/acl_cl/StageGenerator.cc index bbb4b1c..e3d90a5 100644 --- a/runtimes/neurun/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/backend/acl_cl/StageGenerator.cc @@ -218,18 +218,10 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) const auto ker_index{node.getInputs().at(Conv2DNode::Input::KERNEL)}; const auto bias_index{node.getInputs().at(Conv2DNode::Input::BIAS)}; - const auto vstride_index{node.param().vstride_index}; - const auto hstride_index{node.param().hstride_index}; - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); - model::Stride stride; - - stride.vertical = _ctx.at(vstride_index).asScalar(); - stride.horizontal = _ctx.at(hstride_index).asScalar(); - // Construct operation parameters struct Param { @@ -250,7 +242,7 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) param.ker_index = ker_index; param.bias_index = bias_index; - param.stride = stride; + param.stride = node.param().stride; param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape, param.stride, ker_shape.W, ker_shape.H); param.activation = node.param().activation; diff --git a/runtimes/neurun/backend/acl_neon/StageGenerator.cc b/runtimes/neurun/backend/acl_neon/StageGenerator.cc index 2274752..fec9f9d 100644 --- a/runtimes/neurun/backend/acl_neon/StageGenerator.cc +++ b/runtimes/neurun/backend/acl_neon/StageGenerator.cc @@ -185,18 +185,10 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) const auto ker_index{node.getInputs().at(Conv2DNode::Input::KERNEL)}; const auto bias_index{node.getInputs().at(Conv2DNode::Input::BIAS)}; - const auto vstride_index{node.param().vstride_index}; - const auto hstride_index{node.param().hstride_index}; - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); - model::Stride stride; - - stride.vertical = _ctx.at(vstride_index).asScalar(); - stride.horizontal = _ctx.at(hstride_index).asScalar(); - // Construct operation parameters struct Param { @@ -217,7 +209,7 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) param.ker_index = ker_index; param.bias_index = bias_index; - param.stride = stride; + param.stride = node.param().stride; param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape, param.stride, ker_shape.W, ker_shape.H); param.activation = node.param().activation; diff --git a/runtimes/neurun/backend/cpu/StageGenerator.cc b/runtimes/neurun/backend/cpu/StageGenerator.cc index c7f6ac2..dce76d8 100644 --- a/runtimes/neurun/backend/cpu/StageGenerator.cc +++ b/runtimes/neurun/backend/cpu/StageGenerator.cc @@ -63,13 +63,9 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) const auto ker_index{node.getInputs().at(Conv2DNode::Input::KERNEL)}; const auto bias_index{node.getInputs().at(Conv2DNode::Input::BIAS)}; - const auto vstride_index{node.param().vstride_index}; - const auto hstride_index{node.param().hstride_index}; - - model::Stride stride; - - stride.vertical = _ctx.at(vstride_index).asScalar(); - stride.horizontal = _ctx.at(hstride_index).asScalar(); + const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); + const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); + const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); // Construct operation parameters struct Param @@ -101,11 +97,7 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node) param.ker_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(ker_index)); param.bias_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(bias_index)); - param.stride = stride; - - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); - const auto ker_shape = _ctx.at(ker_index).shape().asKernel(); + param.stride = node.param().stride; param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape, param.stride, ker_shape.W, ker_shape.H); param.activation = node.param().activation; diff --git a/runtimes/neurun/core/include/model/operation/Conv2DNode.h b/runtimes/neurun/core/include/model/operation/Conv2DNode.h index 9526119..4b70610 100644 --- a/runtimes/neurun/core/include/model/operation/Conv2DNode.h +++ b/runtimes/neurun/core/include/model/operation/Conv2DNode.h @@ -42,10 +42,6 @@ public: struct Param { Stride stride; - // hstride_index and vtride_index will be deprecated - OperandIndex hstride_index; - OperandIndex vstride_index; - Padding padding; Activation activation; }; diff --git a/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc index fc3da94..4172efc 100644 --- a/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc @@ -53,6 +53,24 @@ ExplicitPadding setExplicitPaddingParam(Operands &operands, const OperandIndex & return param; } +Stride setStride(Operands &operands, const OperandIndex &horizontal_index, + const OperandIndex &vertical_index) +{ + auto horizontal = operands.at(horizontal_index).asScalar(); + auto vertical = operands.at(vertical_index).asScalar(); + + if (vertical < 0 || horizontal < 0) + { + throw std::runtime_error{"Cannot handle negative stride value"}; + } + + Stride stride; + stride.horizontal = horizontal; + stride.vertical = vertical; + + return stride; +} + } // namespace OperationFactory &OperationFactory::instance() @@ -401,12 +419,13 @@ OperationFactory::OperationFactory() // 6 -> Activation Index const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; const auto activation_index = OperandIndex{init_param.inputs[6]}; param.padding.type = NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar()); - param.hstride_index = OperandIndex{init_param.inputs[4]}; - param.vstride_index = OperandIndex{init_param.inputs[5]}; + param.stride = setStride(operands, hstride_index, vstride_index); param.activation = NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); } @@ -426,15 +445,15 @@ OperationFactory::OperationFactory() const auto padding_right_index = OperandIndex{init_param.inputs[4]}; const auto padding_top_index = OperandIndex{init_param.inputs[5]}; const auto padding_bottom_index = OperandIndex{init_param.inputs[6]}; + const auto hstride_index = OperandIndex{init_param.inputs[7]}; + const auto vstride_index = OperandIndex{init_param.inputs[8]}; const auto activation_index = OperandIndex{init_param.inputs[9]}; param.padding.type = PaddingType::EXPLICIT; param.padding.param = setExplicitPaddingParam(operands, padding_left_index, padding_right_index, padding_top_index, padding_bottom_index); - param.hstride_index = OperandIndex{init_param.inputs[7]}; - param.vstride_index = OperandIndex{init_param.inputs[8]}; - + param.stride = setStride(operands, hstride_index, vstride_index); param.activation = NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); } diff --git a/runtimes/neurun/test/graph/operation/SetIO.cc b/runtimes/neurun/test/graph/operation/SetIO.cc index b98b914..939b126 100644 --- a/runtimes/neurun/test/graph/operation/SetIO.cc +++ b/runtimes/neurun/test/graph/operation/SetIO.cc @@ -47,8 +47,8 @@ TEST(graph_operation_setIO, operation_setIO_conv) GraphNode::Param conv_params; conv_params.padding.type = neurun::model::PaddingType::SAME; - conv_params.hstride_index = model.operands.append(shape, type); - conv_params.vstride_index = model.operands.append(shape, type); + conv_params.stride.horizontal = 1; + conv_params.stride.vertical = 1; conv_params.activation = neurun::model::Activation::NONE; auto output_operand = model.operands.append(shape, type).value(); -- 2.7.4