{
using namespace neurun::model;
+ _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param) {
+ assert((init_param.input_count == 8 || init_param.input_count == 11) &&
+ init_param.output_count == 1);
+
+ // In common
+ // 0 -> IFM Tensor Index
+ // 1 -> Kernel Tensor Index
+ // 2 -> Bias Tensor Index
+ operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ operation::DepthwiseConv2DNode::Param param;
+ if (init_param.input_count == 8)
+ {
+ // Imlicit Padding case
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 4 -> Stride (width) Index
+ // 5 -> Stride (height) INdex
+ // 6 -> Depthwise multiplier
+ // 7 -> Activation Index
+
+ param.padding_index = operand::Index{init_param.inputs[3]};
+ param.hstride_index = operand::Index{init_param.inputs[4]};
+ param.vstride_index = operand::Index{init_param.inputs[5]};
+ param.multiplier_index = operand::Index{init_param.inputs[6]};
+ param.activation_index = operand::Index{init_param.inputs[7]};
+
+ param.explicit_padding = false;
+ }
+ else
+ {
+ // Explicit Padding case
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding On the Left
+ // 4 -> Padding On the Right
+ // 5 -> Padding On the Top
+ // 6 -> Padding On the Bottom
+ // 7 -> Stride (width) Index
+ // 8 -> Stride (height) Index
+ // 9 -> Depthwise multiplier
+ // 10-> Activation Index
+
+ param.padding_left_index = operand::Index{init_param.inputs[3]};
+ param.padding_right_index = operand::Index{init_param.inputs[4]};
+ param.padding_top_index = operand::Index{init_param.inputs[5]};
+ param.padding_bottom_index = operand::Index{init_param.inputs[6]};
+ param.hstride_index = operand::Index{init_param.inputs[7]};
+ param.vstride_index = operand::Index{init_param.inputs[8]};
+ param.multiplier_index = operand::Index{init_param.inputs[9]};
+ param.activation_index = operand::Index{init_param.inputs[10]};
+
+ param.explicit_padding = true;
+ }
+
+ return new operation::DepthwiseConv2DNode{inputs, outputs, param};
+ };
+
+ _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param) {
+ assert(init_param.input_count == 7 || init_param.input_count == 10);
+ assert(init_param.output_count == 1);
+
+ // In common
+ // 0 -> IFM Tensor Index
+ operand::IndexSet inputs{init_param.inputs[0]};
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ operation::MaxPool2DNode::Param param;
+ if (init_param.input_count == 7) // support implicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ param.explicit_padding = false;
+
+ param.padding_code_index = operand::Index{init_param.inputs[1]};
+ param.hstride_index = operand::Index{init_param.inputs[2]};
+ param.vstride_index = operand::Index{init_param.inputs[3]};
+
+ param.kw_index = operand::Index{init_param.inputs[4]};
+ param.kh_index = operand::Index{init_param.inputs[5]};
+ param.activation_index = operand::Index{init_param.inputs[6]};
+ }
+ else if (init_param.input_count == 10) // support explicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 1 -> Padding_left index
+ // 2 -> Padding_right index
+ // 3 -> Padding_top index
+ // 4 -> Padding_bottom index
+ // 5 -> Horizontal (over width) Stride Index
+ // 6 -> Vertial (over height) Stride Index
+ // 7 -> Filter Width Index
+ // 8 -> Filter Height Index
+ // 9 -> FuseCode (activation) Index
+
+ param.explicit_padding = true;
+
+ param.padding_left_index = operand::Index{init_param.inputs[1]};
+ param.padding_right_index = operand::Index{init_param.inputs[2]};
+ param.padding_top_index = operand::Index{init_param.inputs[3]};
+ param.padding_bottom_index = operand::Index{init_param.inputs[4]};
+ param.hstride_index = operand::Index{init_param.inputs[5]};
+ param.vstride_index = operand::Index{init_param.inputs[6]};
+
+ param.kw_index = operand::Index{init_param.inputs[7]};
+ param.kh_index = operand::Index{init_param.inputs[8]};
+ param.activation_index = operand::Index{init_param.inputs[9]};
+ }
+ return new operation::MaxPool2DNode{inputs, outputs, param};
+ };
+
+ _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = [](const OperationFactory::Param &init_param) {
+ // TODO We may reuse code here for MAX_POOL_2D. Seems like these two are identical
+ assert(init_param.input_count == 7 || init_param.input_count == 10);
+ assert(init_param.output_count == 1);
+
+ // In common
+ // 0 -> IFM Tensor Index
+ operand::IndexSet inputs{init_param.inputs[0]};
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ operation::AvgPool2DNode::Param param;
+ if (init_param.input_count == 7) // support implicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ param.explicit_padding = false;
+
+ param.padding_code_index = operand::Index{init_param.inputs[1]};
+ param.hstride_index = operand::Index{init_param.inputs[2]};
+ param.vstride_index = operand::Index{init_param.inputs[3]};
+
+ param.kw_index = operand::Index{init_param.inputs[4]};
+ param.kh_index = operand::Index{init_param.inputs[5]};
+ param.activation_index = operand::Index{init_param.inputs[6]};
+ }
+ else if (init_param.input_count == 10) // support explicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 1 -> Padding_left index
+ // 2 -> Padding_right index
+ // 3 -> Padding_top index
+ // 4 -> Padding_bottom index
+ // 5 -> Horizontal (over width) Stride Index
+ // 6 -> Vertial (over height) Stride Index
+ // 7 -> Filter Width Index
+ // 8 -> Filter Height Index
+ // 9 -> FuseCode (activation) Index
+
+ param.explicit_padding = true;
+
+ param.padding_left_index = operand::Index{init_param.inputs[1]};
+ param.padding_right_index = operand::Index{init_param.inputs[2]};
+ param.padding_top_index = operand::Index{init_param.inputs[3]};
+ param.padding_bottom_index = operand::Index{init_param.inputs[4]};
+ param.hstride_index = operand::Index{init_param.inputs[5]};
+ param.vstride_index = operand::Index{init_param.inputs[6]};
+
+ param.kw_index = operand::Index{init_param.inputs[7]};
+ param.kh_index = operand::Index{init_param.inputs[8]};
+ param.activation_index = operand::Index{init_param.inputs[9]};
+ }
+
+ return new operation::AvgPool2DNode{inputs, outputs, param};
+ };
+
+ _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param) {
+ assert(init_param.input_count >= 2); // At least one one input tensor and axis
+ assert(init_param.output_count == 1);
+
+ // When there are N + 1 inputs, each input should be interpreted as follows:
+ //
+ // [0, N) -> Input tensors
+ // N -> Axis
+ //
+
+ operand::IndexSet inputs;
+ for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
+ {
+ inputs.append(operand::Index{init_param.inputs[n]});
+ }
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ operation::ConcatNode::Param param;
+ param.axis_index = operand::Index{init_param.inputs[init_param.input_count - 1]};
+
+ return new operation::ConcatNode{inputs, outputs, param};
+ };
+
_map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
switch (type)
{
+ case ANEURALNETWORKS_DEPTHWISE_CONV_2D:
+ case ANEURALNETWORKS_MAX_POOL_2D:
+ case ANEURALNETWORKS_AVERAGE_POOL_2D:
+ case ANEURALNETWORKS_CONCATENATION:
case ANEURALNETWORKS_ADD:
case ANEURALNETWORKS_SUB:
case ANEURALNETWORKS_CONV_2D:
break;
}
- case ANEURALNETWORKS_DEPTHWISE_CONV_2D:
- {
- // inputCount is either 8 or 11 acccording to NN API specification.
- // - Padding is implicit when inputCount is 8
- // - Padding is explicit when inputCount is 11
- assert(inputCount == 8 || inputCount == 11);
- assert(outputCount == 1);
-
- using GraphNode = neurun::model::operation::DepthwiseConv2DNode;
- _model->addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- case ANEURALNETWORKS_MAX_POOL_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- using GraphNode = neurun::model::operation::MaxPool2DNode;
-
- _model->addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- case ANEURALNETWORKS_AVERAGE_POOL_2D:
- {
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- using GraphNode = neurun::model::operation::AvgPool2DNode;
-
- _model->addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
- case ANEURALNETWORKS_CONCATENATION:
- {
- using GraphNode = neurun::model::operation::ConcatNode;
-
- _model->addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
-
- break;
- }
case ANEURALNETWORKS_RESHAPE:
{
using GraphNode = neurun::model::operation::ReshapeNode;
void AvgPool2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
-AvgPool2DNode::AvgPool2DNode(const model::operation::Node::InitParam &init_param)
- : model::operation::Node{OperandConstraint::createExact(1u)}
+AvgPool2DNode::AvgPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m)
+ : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
{
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- setInputs({init_param.inputs[0]});
- setOutputs({init_param.outputs[0]});
-
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
-
- _param.explicit_padding = false;
-
- _param.padding_code_index = operand::Index{init_param.inputs[1]};
- _param.hstride_index = operand::Index{init_param.inputs[2]};
- _param.vstride_index = operand::Index{init_param.inputs[3]};
-
- _param.kw_index = operand::Index{init_param.inputs[4]};
- _param.kh_index = operand::Index{init_param.inputs[5]};
- _param.activation_index = operand::Index{init_param.inputs[6]};
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
-
- _param.explicit_padding = true;
-
- _param.padding_left_index = operand::Index{init_param.inputs[1]};
- _param.padding_right_index = operand::Index{init_param.inputs[2]};
- _param.padding_top_index = operand::Index{init_param.inputs[3]};
- _param.padding_bottom_index = operand::Index{init_param.inputs[4]};
- _param.hstride_index = operand::Index{init_param.inputs[5]};
- _param.vstride_index = operand::Index{init_param.inputs[6]};
-
- _param.kw_index = operand::Index{init_param.inputs[7]};
- _param.kh_index = operand::Index{init_param.inputs[8]};
- _param.activation_index = operand::Index{init_param.inputs[9]};
- }
}
} // namespace operation
class AvgPool2DNode : public model::operation::Node
{
public:
- AvgPool2DNode(const model::operation::Node::InitParam &init_param);
-
enum Input
{
INPUT = 0
};
public:
+ AvgPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m);
+
+public:
virtual void accept(NodeVisitor &&) const override;
virtual std::string getName() const override { return "AvgPool2D"; }
void ConcatNode::accept(NodeVisitor &&v) const { v.visit(*this); }
-ConcatNode::ConcatNode(const model::operation::Node::InitParam &init_param)
- : model::operation::Node{OperandConstraint::createAtLeast(1u)}
-// axis is not parameter of 'setInputs'
+ConcatNode::ConcatNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m)
+ : model::operation::Node{OperandConstraint::createAtLeast(1u), inputs, outputs}, _param{param}
{
- assert(init_param.input_count >= 2); // At least one one input tensor and axis
- assert(init_param.output_count == 1);
-
- // When there are N + 1 inputs, each input should be interpreted as follows:
- //
- // [0, N) -> Input tensors
- // N -> Axis
- //
-
- {
- operand::IndexSet inds;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inds.append(operand::Index{init_param.inputs[n]});
- }
- setInputs(inds);
- }
- setOutputs({init_param.outputs[0]});
-
- _param.axis_index = operand::Index{init_param.inputs[init_param.input_count - 1]};
}
} // namespace operation
class ConcatNode : public model::operation::Node
{
public:
- ConcatNode(const model::operation::Node::InitParam &init_param);
-
struct Param
{
operand::Index axis_index;
};
public:
+ ConcatNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, const Param ¶m);
+
+public:
virtual void accept(NodeVisitor &&) const override;
virtual std::string getName() const override { return "Concat"; }
void DepthwiseConv2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
-DepthwiseConv2DNode::DepthwiseConv2DNode(const model::operation::Node::InitParam &init_param)
- : model::operation::Node{OperandConstraint::createExact(3u)}
+DepthwiseConv2DNode::DepthwiseConv2DNode(const operand::IndexSet &inputs,
+ const operand::IndexSet &outputs, const Param ¶m)
+ : model::operation::Node{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
{
- assert((init_param.input_count == 8 || init_param.input_count == 11) &&
- init_param.output_count == 1);
-
- if (init_param.input_count == 8)
- {
- // Imlicit Padding case
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Depthwise multiplier
- // 7 -> Activation Index
-
- setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
- setOutputs({init_param.outputs[0]});
-
- _param.padding_index = operand::Index{init_param.inputs[3]};
- _param.hstride_index = operand::Index{init_param.inputs[4]};
- _param.vstride_index = operand::Index{init_param.inputs[5]};
- _param.multiplier_index = operand::Index{init_param.inputs[6]};
- _param.activation_index = operand::Index{init_param.inputs[7]};
-
- _param.explicit_padding = false;
- }
- else
- {
- // Explicit Padding case
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
- // 3 -> Padding On the Left
- // 4 -> Padding On the Right
- // 5 -> Padding On the Top
- // 6 -> Padding On the Bottom
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) Index
- // 9 -> Depthwise multiplier
- // 10-> Activation Index
-
- setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
- setOutputs({init_param.outputs[0]});
-
- _param.padding_left_index = operand::Index{init_param.inputs[3]};
- _param.padding_right_index = operand::Index{init_param.inputs[4]};
- _param.padding_top_index = operand::Index{init_param.inputs[5]};
- _param.padding_bottom_index = operand::Index{init_param.inputs[6]};
- _param.hstride_index = operand::Index{init_param.inputs[7]};
- _param.vstride_index = operand::Index{init_param.inputs[8]};
- _param.multiplier_index = operand::Index{init_param.inputs[9]};
- _param.activation_index = operand::Index{init_param.inputs[10]};
-
- _param.explicit_padding = true;
- }
}
} // namespace operation
class DepthwiseConv2DNode : public model::operation::Node
{
public:
- DepthwiseConv2DNode(const model::operation::Node::InitParam &);
-
enum Input
{
INPUT = 0,
};
public:
+ DepthwiseConv2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m);
+
+public:
virtual void accept(NodeVisitor &&) const override;
virtual std::string getName() const override { return "DepthwiseConv2D"; }
void MaxPool2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
-MaxPool2DNode::MaxPool2DNode(const model::operation::Node::InitParam &init_param)
- : model::operation::Node{OperandConstraint::createExact(1u)}
+MaxPool2DNode::MaxPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m)
+ : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
{
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- setInputs({init_param.inputs[0]});
- setOutputs({init_param.outputs[0]});
-
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
-
- _param.explicit_padding = false;
-
- _param.padding_code_index = operand::Index{init_param.inputs[1]};
- _param.hstride_index = operand::Index{init_param.inputs[2]};
- _param.vstride_index = operand::Index{init_param.inputs[3]};
-
- _param.kw_index = operand::Index{init_param.inputs[4]};
- _param.kh_index = operand::Index{init_param.inputs[5]};
- _param.activation_index = operand::Index{init_param.inputs[6]};
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
-
- _param.explicit_padding = true;
-
- _param.padding_left_index = operand::Index{init_param.inputs[1]};
- _param.padding_right_index = operand::Index{init_param.inputs[2]};
- _param.padding_top_index = operand::Index{init_param.inputs[3]};
- _param.padding_bottom_index = operand::Index{init_param.inputs[4]};
- _param.hstride_index = operand::Index{init_param.inputs[5]};
- _param.vstride_index = operand::Index{init_param.inputs[6]};
-
- _param.kw_index = operand::Index{init_param.inputs[7]};
- _param.kh_index = operand::Index{init_param.inputs[8]};
- _param.activation_index = operand::Index{init_param.inputs[9]};
- }
}
} // namespace operation
class MaxPool2DNode : public model::operation::Node
{
public:
- MaxPool2DNode(const model::operation::Node::InitParam &init_param);
-
enum Input
{
INPUT = 0
};
public:
+ MaxPool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m);
+
+public:
virtual void accept(NodeVisitor &&) const override;
virtual std::string getName() const override { return "MaxPool2D"; }
using GraphNode = neurun::model::operation::ConcatNode;
auto concat =
- nnfw::cpp14::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
+ std::unique_ptr<GraphNode>{dynamic_cast<GraphNode *>(OperationFactory::instance().create(
+ ANEURALNETWORKS_CONCATENATION, {7, params.data(), 1, &outoperand}))};
ASSERT_EQ(concat->getInputs().size(), 6);
ASSERT_EQ(concat->getInputs().at(Index{0}).value(), params[0]);