From 9d55e3e91be4277354a4f8728dfab9c39bec6774 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9D=B4=ED=95=9C=EC=A2=85/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 5 Mar 2019 09:33:38 +0900 Subject: [PATCH] [neurun] Create nodes with OperationFactory (#4563) Move the nodes creation from `ANeuralNetworksModel_addOperation` to `OperationFactory` for the nodes that are still not moved. Signed-off-by: Hanjoung Lee --- .../src/frontend/wrapper/OperationFactory.cc | 207 +++++++++++++++++++++ runtimes/neurun/src/frontend/wrapper/model.cc | 53 +----- .../neurun/src/model/operation/AvgPool2DNode.cc | 61 +----- .../neurun/src/model/operation/AvgPool2DNode.h | 6 +- runtimes/neurun/src/model/operation/ConcatNode.cc | 26 +-- runtimes/neurun/src/model/operation/ConcatNode.h | 5 +- .../src/model/operation/DepthwiseConv2DNode.cc | 64 +------ .../src/model/operation/DepthwiseConv2DNode.h | 6 +- .../neurun/src/model/operation/MaxPool2DNode.cc | 61 +----- .../neurun/src/model/operation/MaxPool2DNode.h | 6 +- runtimes/neurun/test/graph/operation/SetIO.cc | 3 +- 11 files changed, 240 insertions(+), 258 deletions(-) diff --git a/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc b/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc index 042e2b6..09eb4e8 100644 --- a/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc +++ b/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc @@ -28,6 +28,213 @@ OperationFactory::OperationFactory() { using namespace neurun::model; + _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param) { + assert((init_param.input_count == 8 || init_param.input_count == 11) && + init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + // 1 -> Kernel Tensor Index + // 2 -> Bias Tensor Index + operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + operand::IndexSet outputs{init_param.outputs[0]}; + + operation::DepthwiseConv2DNode::Param param; + if (init_param.input_count == 8) + { + // Imlicit Padding case + // Each input should be interpreted as follows: + // + // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 4 -> Stride (width) Index + // 5 -> Stride (height) INdex + // 6 -> Depthwise multiplier + // 7 -> Activation Index + + param.padding_index = operand::Index{init_param.inputs[3]}; + param.hstride_index = operand::Index{init_param.inputs[4]}; + param.vstride_index = operand::Index{init_param.inputs[5]}; + param.multiplier_index = operand::Index{init_param.inputs[6]}; + param.activation_index = operand::Index{init_param.inputs[7]}; + + param.explicit_padding = false; + } + else + { + // Explicit Padding case + // Each input should be interpreted as follows: + // + // 3 -> Padding On the Left + // 4 -> Padding On the Right + // 5 -> Padding On the Top + // 6 -> Padding On the Bottom + // 7 -> Stride (width) Index + // 8 -> Stride (height) Index + // 9 -> Depthwise multiplier + // 10-> Activation Index + + param.padding_left_index = operand::Index{init_param.inputs[3]}; + param.padding_right_index = operand::Index{init_param.inputs[4]}; + param.padding_top_index = operand::Index{init_param.inputs[5]}; + param.padding_bottom_index = operand::Index{init_param.inputs[6]}; + param.hstride_index = operand::Index{init_param.inputs[7]}; + param.vstride_index = operand::Index{init_param.inputs[8]}; + param.multiplier_index = operand::Index{init_param.inputs[9]}; + param.activation_index = operand::Index{init_param.inputs[10]}; + + param.explicit_padding = true; + } + + return new operation::DepthwiseConv2DNode{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + operand::IndexSet inputs{init_param.inputs[0]}; + operand::IndexSet outputs{init_param.outputs[0]}; + + operation::MaxPool2DNode::Param param; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + param.explicit_padding = false; + + param.padding_code_index = operand::Index{init_param.inputs[1]}; + param.hstride_index = operand::Index{init_param.inputs[2]}; + param.vstride_index = operand::Index{init_param.inputs[3]}; + + param.kw_index = operand::Index{init_param.inputs[4]}; + param.kh_index = operand::Index{init_param.inputs[5]}; + param.activation_index = operand::Index{init_param.inputs[6]}; + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + param.explicit_padding = true; + + param.padding_left_index = operand::Index{init_param.inputs[1]}; + param.padding_right_index = operand::Index{init_param.inputs[2]}; + param.padding_top_index = operand::Index{init_param.inputs[3]}; + param.padding_bottom_index = operand::Index{init_param.inputs[4]}; + param.hstride_index = operand::Index{init_param.inputs[5]}; + param.vstride_index = operand::Index{init_param.inputs[6]}; + + param.kw_index = operand::Index{init_param.inputs[7]}; + param.kh_index = operand::Index{init_param.inputs[8]}; + param.activation_index = operand::Index{init_param.inputs[9]}; + } + return new operation::MaxPool2DNode{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = [](const OperationFactory::Param &init_param) { + // TODO We may reuse code here for MAX_POOL_2D. Seems like these two are identical + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + operand::IndexSet inputs{init_param.inputs[0]}; + operand::IndexSet outputs{init_param.outputs[0]}; + + operation::AvgPool2DNode::Param param; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + param.explicit_padding = false; + + param.padding_code_index = operand::Index{init_param.inputs[1]}; + param.hstride_index = operand::Index{init_param.inputs[2]}; + param.vstride_index = operand::Index{init_param.inputs[3]}; + + param.kw_index = operand::Index{init_param.inputs[4]}; + param.kh_index = operand::Index{init_param.inputs[5]}; + param.activation_index = operand::Index{init_param.inputs[6]}; + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + param.explicit_padding = true; + + param.padding_left_index = operand::Index{init_param.inputs[1]}; + param.padding_right_index = operand::Index{init_param.inputs[2]}; + param.padding_top_index = operand::Index{init_param.inputs[3]}; + param.padding_bottom_index = operand::Index{init_param.inputs[4]}; + param.hstride_index = operand::Index{init_param.inputs[5]}; + param.vstride_index = operand::Index{init_param.inputs[6]}; + + param.kw_index = operand::Index{init_param.inputs[7]}; + param.kh_index = operand::Index{init_param.inputs[8]}; + param.activation_index = operand::Index{init_param.inputs[9]}; + } + + return new operation::AvgPool2DNode{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count >= 2); // At least one one input tensor and axis + assert(init_param.output_count == 1); + + // When there are N + 1 inputs, each input should be interpreted as follows: + // + // [0, N) -> Input tensors + // N -> Axis + // + + operand::IndexSet inputs; + for (uint32_t n = 0; n < init_param.input_count - 1; ++n) + { + inputs.append(operand::Index{init_param.inputs[n]}); + } + operand::IndexSet outputs{init_param.outputs[0]}; + + operation::ConcatNode::Param param; + param.axis_index = operand::Index{init_param.inputs[init_param.input_count - 1]}; + + return new operation::ConcatNode{inputs, outputs, param}; + }; + _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param) { assert(init_param.input_count == 1 && init_param.output_count == 1); diff --git a/runtimes/neurun/src/frontend/wrapper/model.cc b/runtimes/neurun/src/frontend/wrapper/model.cc index 4b95cea..572ddfd 100644 --- a/runtimes/neurun/src/frontend/wrapper/model.cc +++ b/runtimes/neurun/src/frontend/wrapper/model.cc @@ -109,6 +109,10 @@ bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint3 switch (type) { + case ANEURALNETWORKS_DEPTHWISE_CONV_2D: + case ANEURALNETWORKS_MAX_POOL_2D: + case ANEURALNETWORKS_AVERAGE_POOL_2D: + case ANEURALNETWORKS_CONCATENATION: case ANEURALNETWORKS_ADD: case ANEURALNETWORKS_SUB: case ANEURALNETWORKS_CONV_2D: @@ -122,55 +126,6 @@ bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint3 break; } - case ANEURALNETWORKS_DEPTHWISE_CONV_2D: - { - // inputCount is either 8 or 11 acccording to NN API specification. - // - Padding is implicit when inputCount is 8 - // - Padding is explicit when inputCount is 11 - assert(inputCount == 8 || inputCount == 11); - assert(outputCount == 1); - - using GraphNode = neurun::model::operation::DepthwiseConv2DNode; - _model->addOperation(nnfw::cpp14::make_unique(node_param)); - - break; - } - case ANEURALNETWORKS_MAX_POOL_2D: - { - // inputCount is either 7 or 10 acccording to NN API specification. - // - Padding is implicit when inputCount is 7 - // - Padding is explicit when inputCount is 10 - assert(inputCount == 7 || inputCount == 10); - assert(outputCount == 1); - - using GraphNode = neurun::model::operation::MaxPool2DNode; - - _model->addOperation(nnfw::cpp14::make_unique(node_param)); - - break; - } - case ANEURALNETWORKS_AVERAGE_POOL_2D: - { - // inputCount is either 7 or 10 acccording to NN API specification. - // - Padding is implicit when inputCount is 7 - // - Padding is explicit when inputCount is 10 - assert(inputCount == 7 || inputCount == 10); - assert(outputCount == 1); - - using GraphNode = neurun::model::operation::AvgPool2DNode; - - _model->addOperation(nnfw::cpp14::make_unique(node_param)); - - break; - } - case ANEURALNETWORKS_CONCATENATION: - { - using GraphNode = neurun::model::operation::ConcatNode; - - _model->addOperation(nnfw::cpp14::make_unique(node_param)); - - break; - } case ANEURALNETWORKS_RESHAPE: { using GraphNode = neurun::model::operation::ReshapeNode; diff --git a/runtimes/neurun/src/model/operation/AvgPool2DNode.cc b/runtimes/neurun/src/model/operation/AvgPool2DNode.cc index 9718980..f15b501 100644 --- a/runtimes/neurun/src/model/operation/AvgPool2DNode.cc +++ b/runtimes/neurun/src/model/operation/AvgPool2DNode.cc @@ -29,65 +29,10 @@ namespace operation void AvgPool2DNode::accept(NodeVisitor &&v) const { v.visit(*this); } -AvgPool2DNode::AvgPool2DNode(const model::operation::Node::InitParam &init_param) - : model::operation::Node{OperandConstraint::createExact(1u)} +AvgPool2DNode::AvgPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m) + : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param} { - assert(init_param.input_count == 7 || init_param.input_count == 10); - assert(init_param.output_count == 1); - - setInputs({init_param.inputs[0]}); - setOutputs({init_param.outputs[0]}); - - if (init_param.input_count == 7) // support implicit padding - { - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 2 -> Horizontal (over width) Stride Index - // 3 -> Vertial (over height) Stride Index - // 4 -> Filter Width Index - // 5 -> Filter Height Index - // 6 -> FuseCode (activation) Index - - _param.explicit_padding = false; - - _param.padding_code_index = operand::Index{init_param.inputs[1]}; - _param.hstride_index = operand::Index{init_param.inputs[2]}; - _param.vstride_index = operand::Index{init_param.inputs[3]}; - - _param.kw_index = operand::Index{init_param.inputs[4]}; - _param.kh_index = operand::Index{init_param.inputs[5]}; - _param.activation_index = operand::Index{init_param.inputs[6]}; - } - else if (init_param.input_count == 10) // support explicit padding - { - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - // 1 -> Padding_left index - // 2 -> Padding_right index - // 3 -> Padding_top index - // 4 -> Padding_bottom index - // 5 -> Horizontal (over width) Stride Index - // 6 -> Vertial (over height) Stride Index - // 7 -> Filter Width Index - // 8 -> Filter Height Index - // 9 -> FuseCode (activation) Index - - _param.explicit_padding = true; - - _param.padding_left_index = operand::Index{init_param.inputs[1]}; - _param.padding_right_index = operand::Index{init_param.inputs[2]}; - _param.padding_top_index = operand::Index{init_param.inputs[3]}; - _param.padding_bottom_index = operand::Index{init_param.inputs[4]}; - _param.hstride_index = operand::Index{init_param.inputs[5]}; - _param.vstride_index = operand::Index{init_param.inputs[6]}; - - _param.kw_index = operand::Index{init_param.inputs[7]}; - _param.kh_index = operand::Index{init_param.inputs[8]}; - _param.activation_index = operand::Index{init_param.inputs[9]}; - } } } // namespace operation diff --git a/runtimes/neurun/src/model/operation/AvgPool2DNode.h b/runtimes/neurun/src/model/operation/AvgPool2DNode.h index ac77e68..2118ffb 100644 --- a/runtimes/neurun/src/model/operation/AvgPool2DNode.h +++ b/runtimes/neurun/src/model/operation/AvgPool2DNode.h @@ -31,8 +31,6 @@ namespace operation class AvgPool2DNode : public model::operation::Node { public: - AvgPool2DNode(const model::operation::Node::InitParam &init_param); - enum Input { INPUT = 0 @@ -59,6 +57,10 @@ public: }; public: + AvgPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m); + +public: virtual void accept(NodeVisitor &&) const override; virtual std::string getName() const override { return "AvgPool2D"; } diff --git a/runtimes/neurun/src/model/operation/ConcatNode.cc b/runtimes/neurun/src/model/operation/ConcatNode.cc index e4211d1..e25fcd0 100644 --- a/runtimes/neurun/src/model/operation/ConcatNode.cc +++ b/runtimes/neurun/src/model/operation/ConcatNode.cc @@ -29,30 +29,10 @@ namespace operation void ConcatNode::accept(NodeVisitor &&v) const { v.visit(*this); } -ConcatNode::ConcatNode(const model::operation::Node::InitParam &init_param) - : model::operation::Node{OperandConstraint::createAtLeast(1u)} -// axis is not parameter of 'setInputs' +ConcatNode::ConcatNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m) + : model::operation::Node{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param} { - assert(init_param.input_count >= 2); // At least one one input tensor and axis - assert(init_param.output_count == 1); - - // When there are N + 1 inputs, each input should be interpreted as follows: - // - // [0, N) -> Input tensors - // N -> Axis - // - - { - operand::IndexSet inds; - for (uint32_t n = 0; n < init_param.input_count - 1; ++n) - { - inds.append(operand::Index{init_param.inputs[n]}); - } - setInputs(inds); - } - setOutputs({init_param.outputs[0]}); - - _param.axis_index = operand::Index{init_param.inputs[init_param.input_count - 1]}; } } // namespace operation diff --git a/runtimes/neurun/src/model/operation/ConcatNode.h b/runtimes/neurun/src/model/operation/ConcatNode.h index b69ee2f..2d87abb 100644 --- a/runtimes/neurun/src/model/operation/ConcatNode.h +++ b/runtimes/neurun/src/model/operation/ConcatNode.h @@ -31,14 +31,15 @@ namespace operation class ConcatNode : public model::operation::Node { public: - ConcatNode(const model::operation::Node::InitParam &init_param); - struct Param { operand::Index axis_index; }; public: + ConcatNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, const Param ¶m); + +public: virtual void accept(NodeVisitor &&) const override; virtual std::string getName() const override { return "Concat"; } diff --git a/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.cc b/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.cc index c06163a..c34658d 100644 --- a/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.cc +++ b/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.cc @@ -29,68 +29,10 @@ namespace operation void DepthwiseConv2DNode::accept(NodeVisitor &&v) const { v.visit(*this); } -DepthwiseConv2DNode::DepthwiseConv2DNode(const model::operation::Node::InitParam &init_param) - : model::operation::Node{OperandConstraint::createExact(3u)} +DepthwiseConv2DNode::DepthwiseConv2DNode(const operand::IndexSet &inputs, + const operand::IndexSet &outputs, const Param ¶m) + : model::operation::Node{OperandConstraint::createExact(3u), inputs, outputs}, _param{param} { - assert((init_param.input_count == 8 || init_param.input_count == 11) && - init_param.output_count == 1); - - if (init_param.input_count == 8) - { - // Imlicit Padding case - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - // 1 -> Kernel Tensor Index - // 2 -> Bias Tensor Index - // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 4 -> Stride (width) Index - // 5 -> Stride (height) INdex - // 6 -> Depthwise multiplier - // 7 -> Activation Index - - setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}); - setOutputs({init_param.outputs[0]}); - - _param.padding_index = operand::Index{init_param.inputs[3]}; - _param.hstride_index = operand::Index{init_param.inputs[4]}; - _param.vstride_index = operand::Index{init_param.inputs[5]}; - _param.multiplier_index = operand::Index{init_param.inputs[6]}; - _param.activation_index = operand::Index{init_param.inputs[7]}; - - _param.explicit_padding = false; - } - else - { - // Explicit Padding case - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - // 1 -> Kernel Tensor Index - // 2 -> Bias Tensor Index - // 3 -> Padding On the Left - // 4 -> Padding On the Right - // 5 -> Padding On the Top - // 6 -> Padding On the Bottom - // 7 -> Stride (width) Index - // 8 -> Stride (height) Index - // 9 -> Depthwise multiplier - // 10-> Activation Index - - setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}); - setOutputs({init_param.outputs[0]}); - - _param.padding_left_index = operand::Index{init_param.inputs[3]}; - _param.padding_right_index = operand::Index{init_param.inputs[4]}; - _param.padding_top_index = operand::Index{init_param.inputs[5]}; - _param.padding_bottom_index = operand::Index{init_param.inputs[6]}; - _param.hstride_index = operand::Index{init_param.inputs[7]}; - _param.vstride_index = operand::Index{init_param.inputs[8]}; - _param.multiplier_index = operand::Index{init_param.inputs[9]}; - _param.activation_index = operand::Index{init_param.inputs[10]}; - - _param.explicit_padding = true; - } } } // namespace operation diff --git a/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.h b/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.h index c3c70d6..ff06352 100644 --- a/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.h +++ b/runtimes/neurun/src/model/operation/DepthwiseConv2DNode.h @@ -31,8 +31,6 @@ namespace operation class DepthwiseConv2DNode : public model::operation::Node { public: - DepthwiseConv2DNode(const model::operation::Node::InitParam &); - enum Input { INPUT = 0, @@ -59,6 +57,10 @@ public: }; public: + DepthwiseConv2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m); + +public: virtual void accept(NodeVisitor &&) const override; virtual std::string getName() const override { return "DepthwiseConv2D"; } diff --git a/runtimes/neurun/src/model/operation/MaxPool2DNode.cc b/runtimes/neurun/src/model/operation/MaxPool2DNode.cc index a4ec086..cfc08ab 100644 --- a/runtimes/neurun/src/model/operation/MaxPool2DNode.cc +++ b/runtimes/neurun/src/model/operation/MaxPool2DNode.cc @@ -29,65 +29,10 @@ namespace operation void MaxPool2DNode::accept(NodeVisitor &&v) const { v.visit(*this); } -MaxPool2DNode::MaxPool2DNode(const model::operation::Node::InitParam &init_param) - : model::operation::Node{OperandConstraint::createExact(1u)} +MaxPool2DNode::MaxPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m) + : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param} { - assert(init_param.input_count == 7 || init_param.input_count == 10); - assert(init_param.output_count == 1); - - setInputs({init_param.inputs[0]}); - setOutputs({init_param.outputs[0]}); - - if (init_param.input_count == 7) // support implicit padding - { - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index - // 2 -> Horizontal (over width) Stride Index - // 3 -> Vertial (over height) Stride Index - // 4 -> Filter Width Index - // 5 -> Filter Height Index - // 6 -> FuseCode (activation) Index - - _param.explicit_padding = false; - - _param.padding_code_index = operand::Index{init_param.inputs[1]}; - _param.hstride_index = operand::Index{init_param.inputs[2]}; - _param.vstride_index = operand::Index{init_param.inputs[3]}; - - _param.kw_index = operand::Index{init_param.inputs[4]}; - _param.kh_index = operand::Index{init_param.inputs[5]}; - _param.activation_index = operand::Index{init_param.inputs[6]}; - } - else if (init_param.input_count == 10) // support explicit padding - { - // Each input should be interpreted as follows: - // - // 0 -> IFM Tensor Index - // 1 -> Padding_left index - // 2 -> Padding_right index - // 3 -> Padding_top index - // 4 -> Padding_bottom index - // 5 -> Horizontal (over width) Stride Index - // 6 -> Vertial (over height) Stride Index - // 7 -> Filter Width Index - // 8 -> Filter Height Index - // 9 -> FuseCode (activation) Index - - _param.explicit_padding = true; - - _param.padding_left_index = operand::Index{init_param.inputs[1]}; - _param.padding_right_index = operand::Index{init_param.inputs[2]}; - _param.padding_top_index = operand::Index{init_param.inputs[3]}; - _param.padding_bottom_index = operand::Index{init_param.inputs[4]}; - _param.hstride_index = operand::Index{init_param.inputs[5]}; - _param.vstride_index = operand::Index{init_param.inputs[6]}; - - _param.kw_index = operand::Index{init_param.inputs[7]}; - _param.kh_index = operand::Index{init_param.inputs[8]}; - _param.activation_index = operand::Index{init_param.inputs[9]}; - } } } // namespace operation diff --git a/runtimes/neurun/src/model/operation/MaxPool2DNode.h b/runtimes/neurun/src/model/operation/MaxPool2DNode.h index b99ca1b..3841abb 100644 --- a/runtimes/neurun/src/model/operation/MaxPool2DNode.h +++ b/runtimes/neurun/src/model/operation/MaxPool2DNode.h @@ -31,8 +31,6 @@ namespace operation class MaxPool2DNode : public model::operation::Node { public: - MaxPool2DNode(const model::operation::Node::InitParam &init_param); - enum Input { INPUT = 0 @@ -59,6 +57,10 @@ public: }; public: + MaxPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m); + +public: virtual void accept(NodeVisitor &&) const override; virtual std::string getName() const override { return "MaxPool2D"; } diff --git a/runtimes/neurun/test/graph/operation/SetIO.cc b/runtimes/neurun/test/graph/operation/SetIO.cc index 20e5473..dc389da 100644 --- a/runtimes/neurun/test/graph/operation/SetIO.cc +++ b/runtimes/neurun/test/graph/operation/SetIO.cc @@ -76,7 +76,8 @@ TEST(graph_operation_setIO, operation_setIO_concat) using GraphNode = neurun::model::operation::ConcatNode; auto concat = - nnfw::cpp14::make_unique(GraphNodeInitParam{7, params.data(), 1, &outoperand}); + std::unique_ptr{dynamic_cast(OperationFactory::instance().create( + ANEURALNETWORKS_CONCATENATION, {7, params.data(), 1, &outoperand}))}; ASSERT_EQ(concat->getInputs().size(), 6); ASSERT_EQ(concat->getInputs().at(Index{0}).value(), params[0]); -- 2.7.4