From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Tue, 28 May 2019 07:52:44 +0000 (+0900) Subject: Resolve param value for maxpool at frontend (#5292) X-Git-Tag: submit/tizen/20190809.050447~751 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f7ab172d177c0eb06319cd56347f751aa745e201;p=platform%2Fcore%2Fml%2Fnnfw.git Resolve param value for maxpool at frontend (#5292) - Remove stride and kernel size index param in maxpool node - Resolve param values for maxpool at frontend - Use resolved constant param at each backend Signed-off-by: Hyeongseok Oh --- diff --git a/runtimes/neurun/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/backend/acl_cl/StageGenerator.cc index e3d90a5..5dde64b 100644 --- a/runtimes/neurun/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/backend/acl_cl/StageGenerator.cc @@ -353,21 +353,9 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(model::operation::MaxPool2DNode::Input::INPUT)}; - const auto kh_index{node.param().kh_index}; - const auto kw_index{node.param().kw_index}; - - const auto vstride_index{node.param().vstride_index}; - const auto hstride_index{node.param().hstride_index}; - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); - const int32_t kh = _ctx.at(kh_index).asScalar(); - const int32_t kw = _ctx.at(kw_index).asScalar(); - - const int32_t vstride = _ctx.at(vstride_index).asScalar(); - const int32_t hstride = _ctx.at(hstride_index).asScalar(); - // Construct operation parameters struct Param { @@ -387,24 +375,21 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) param.ofm_index = ofm_index; param.ifm_index = ifm_index; - param.kh = kh; - param.kw = kw; - - param.stride.vertical = vstride; - param.stride.horizontal = hstride; - + param.kh = node.param().kh; + param.kw = node.param().kw; + param.stride = node.param().stride; param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape, - param.stride, kw, kh); + param.stride, param.kw, param.kh); param.activation = node.param().activation; VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl; VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl; VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl; VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl; - VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl; - VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_W: " << hstride << std::endl; + VERBOSE(MaxPool2D) << "KER_H: " << param.kh << std::endl; + VERBOSE(MaxPool2D) << "KER_W: " << param.kw << std::endl; + VERBOSE(MaxPool2D) << "STRIDE_H: " << param.stride.vertical << std::endl; + VERBOSE(MaxPool2D) << "STRIDE_W: " << param.stride.horizontal << std::endl; VERBOSE(MaxPool2D) << "PAD(T): " << param.padding.top << std::endl; VERBOSE(MaxPool2D) << "PAD(B): " << param.padding.bottom << std::endl; VERBOSE(MaxPool2D) << "PAD(L): " << param.padding.left << std::endl; diff --git a/runtimes/neurun/backend/acl_neon/StageGenerator.cc b/runtimes/neurun/backend/acl_neon/StageGenerator.cc index fec9f9d..5b97b68 100644 --- a/runtimes/neurun/backend/acl_neon/StageGenerator.cc +++ b/runtimes/neurun/backend/acl_neon/StageGenerator.cc @@ -320,21 +320,9 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(model::operation::MaxPool2DNode::Input::INPUT)}; - const auto kh_index{node.param().kh_index}; - const auto kw_index{node.param().kw_index}; - - const auto vstride_index{node.param().vstride_index}; - const auto hstride_index{node.param().hstride_index}; - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); - const int32_t kh = _ctx.at(kh_index).asScalar(); - const int32_t kw = _ctx.at(kw_index).asScalar(); - - const int32_t vstride = _ctx.at(vstride_index).asScalar(); - const int32_t hstride = _ctx.at(hstride_index).asScalar(); - // Construct operation parameters struct Param { @@ -354,24 +342,21 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) param.ofm_index = ofm_index; param.ifm_index = ifm_index; - param.kh = kh; - param.kw = kw; - - param.stride.vertical = vstride; - param.stride.horizontal = hstride; - + param.kh = node.param().kh; + param.kw = node.param().kw; + param.stride = node.param().stride; param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape, - param.stride, kw, kh); + param.stride, param.kw, param.kh); param.activation = node.param().activation; VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl; VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl; VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl; VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl; - VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl; - VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl; - VERBOSE(MaxPool2D) << "STRIDE_W: " << hstride << std::endl; + VERBOSE(MaxPool2D) << "KER_H: " << param.kh << std::endl; + VERBOSE(MaxPool2D) << "KER_W: " << param.kw << std::endl; + VERBOSE(MaxPool2D) << "STRIDE_H: " << param.stride.vertical << std::endl; + VERBOSE(MaxPool2D) << "STRIDE_W: " << param.stride.horizontal << std::endl; VERBOSE(MaxPool2D) << "PAD(T): " << param.padding.top << std::endl; VERBOSE(MaxPool2D) << "PAD(B): " << param.padding.bottom << std::endl; VERBOSE(MaxPool2D) << "PAD(L): " << param.padding.left << std::endl; diff --git a/runtimes/neurun/backend/cpu/StageGenerator.cc b/runtimes/neurun/backend/cpu/StageGenerator.cc index dce76d8..116fbcf 100644 --- a/runtimes/neurun/backend/cpu/StageGenerator.cc +++ b/runtimes/neurun/backend/cpu/StageGenerator.cc @@ -212,17 +212,8 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) const auto ofm_index{node.getOutputs().at(0)}; const auto ifm_index{node.getInputs().at(model::operation::MaxPool2DNode::Input::INPUT)}; - const auto kh_index{node.param().kh_index}; - const auto kw_index{node.param().kw_index}; - - const auto vstride_index{node.param().vstride_index}; - const auto hstride_index{node.param().hstride_index}; - - const int32_t kh = _ctx.at(kh_index).asScalar(); - const int32_t kw = _ctx.at(kw_index).asScalar(); - - const int32_t vstride = _ctx.at(vstride_index).asScalar(); - const int32_t hstride = _ctx.at(hstride_index).asScalar(); + const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); + const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); // Construct operation parameters struct Param @@ -246,19 +237,15 @@ void StageGenerator::visit(const model::operation::MaxPool2DNode &node) param.ofm_index = ofm_index; param.ifm_index = ifm_index; - param.kh = kh; - param.kw = kw; + param.kh = node.param().kh; + param.kw = node.param().kw; param.ofm_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(ofm_index)); param.ifm_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(ifm_index)); - param.stride.vertical = vstride; - param.stride.horizontal = hstride; - - const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); - const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); + param.stride = node.param().stride; param.padding = neurun::util::calculatePadding(node.param().padding, ifm_shape, ofm_shape, - param.stride, kw, kh); + param.stride, param.kw, param.kh); param.activation = node.param().activation; auto tensors = _tensor_builder; diff --git a/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h b/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h index 7420353..8492bcf 100644 --- a/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h +++ b/runtimes/neurun/core/include/model/operation/MaxPool2DNode.h @@ -39,13 +39,9 @@ public: struct Param { - OperandIndex kw_index; - OperandIndex kh_index; + uint32_t kw; + uint32_t kh; Stride stride; - // hstride_index and vtride_index will be deprecated - OperandIndex hstride_index; - OperandIndex vstride_index; - Padding padding; Activation activation; }; diff --git a/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc index 8c64fb4..a115c2a 100644 --- a/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc @@ -70,6 +70,17 @@ Stride makeStride(Operands &operands, const OperandIndex &horizontal_index, return stride; } +uint32_t getUint32Scalar(Operands &operands, const OperandIndex index) +{ + auto int32_value = operands.at(index).asScalar(); + if (int32_value < 0) + { + throw std::runtime_error{"Cannot handle negative value"}; + } + + return static_cast(int32_value); +} + } // namespace OperationFactory &OperationFactory::instance() @@ -173,16 +184,17 @@ OperationFactory::OperationFactory() // 6 -> FuseCode (activation) Index const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; const auto activation_index = OperandIndex{init_param.inputs[6]}; param.padding.type = NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar()); - - param.hstride_index = OperandIndex{init_param.inputs[2]}; - param.vstride_index = OperandIndex{init_param.inputs[3]}; - - param.kw_index = OperandIndex{init_param.inputs[4]}; - param.kh_index = OperandIndex{init_param.inputs[5]}; + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = operands.at(kh_index).asScalar(); param.activation = NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); } @@ -204,16 +216,18 @@ OperationFactory::OperationFactory() const auto padding_right_index = OperandIndex{init_param.inputs[2]}; const auto padding_top_index = OperandIndex{init_param.inputs[3]}; const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; const auto activation_index = OperandIndex{init_param.inputs[9]}; param.padding.type = PaddingType::EXPLICIT; param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, padding_top_index, padding_bottom_index); - param.hstride_index = OperandIndex{init_param.inputs[5]}; - param.vstride_index = OperandIndex{init_param.inputs[6]}; - - param.kw_index = OperandIndex{init_param.inputs[7]}; - param.kh_index = OperandIndex{init_param.inputs[8]}; + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); param.activation = NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar()); }