#include "graph/Graph.h"
#include "frontend/wrapper/model.h"
#include "frontend/wrapper/memory.h"
+#include "frontend/wrapper/OperationFactory.h"
#include "model/operation/Node.Include.h"
#include "util/NNAPIConvert.h"
#include "util/logging.h"
auto &graph = model->deref();
- auto node_param =
+ auto &factory = OperationFactory::instance();
+ auto node_param = // TODO Will be removed once all creation is done via factory
neurun::model::operation::Node::InitParam{inputCount, inputs, outputCount, outputs};
+ OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
try
{
}
case ANEURALNETWORKS_CONV_2D:
{
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(inputCount == 7 || inputCount == 10);
- assert(outputCount == 1);
-
- using GraphNode = neurun::model::operation::Conv2DNode;
-
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
+ auto node = factory.create(type, param);
+ graph.addOperation(std::unique_ptr<neurun::model::operation::Node>{node});
break;
}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationFactory.h"
+
+#include "model/operation/Conv2DNode.h"
+
+OperationFactory &OperationFactory::instance()
+{
+ static OperationFactory factory;
+ return factory;
+}
+
+OperationFactory::OperationFactory()
+{
+ _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param) {
+ using namespace neurun::model;
+ using neurun::model::operation::Conv2DNode;
+
+ // inputCount is either 7 or 10 acccording to NN API specification.
+ // - Padding is implicit when inputCount is 7
+ // - Padding is explicit when inputCount is 10
+ assert(init_param.input_count == 7 || init_param.input_count == 10);
+ assert(init_param.output_count == 1);
+
+ // 0 -> IFM Tensor Index
+ // 1 -> Kernel Tensor Index
+ // 2 -> Bias Tensor Index
+
+ operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ Conv2DNode::Param param;
+
+ if (init_param.input_count == 7) // support implicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 4 -> Stride (width) Index
+ // 5 -> Stride (height) INdex
+ // 6 -> Activation Index
+
+ param.explicit_padding = false;
+
+ param.padding_code_index = operand::Index{init_param.inputs[3]};
+ param.hstride_index = operand::Index{init_param.inputs[4]};
+ param.vstride_index = operand::Index{init_param.inputs[5]};
+ param.activation_index = operand::Index{init_param.inputs[6]};
+ }
+ else if (init_param.input_count == 10) // support explicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding_left index
+ // 4 -> Padding_right index
+ // 5 -> Padding_top index
+ // 6 -> Padding_bottom index
+ // 7 -> Stride (width) Index
+ // 8 -> Stride (height) INdex
+ // 9 -> Activation Index
+
+ param.explicit_padding = true;
+
+ param.padding_left_index = operand::Index{init_param.inputs[3]};
+ param.padding_right_index = operand::Index{init_param.inputs[4]};
+ param.padding_top_index = operand::Index{init_param.inputs[5]};
+ param.padding_bottom_index = operand::Index{init_param.inputs[6]};
+
+ param.hstride_index = operand::Index{init_param.inputs[7]};
+ param.vstride_index = operand::Index{init_param.inputs[8]};
+
+ param.activation_index = operand::Index{init_param.inputs[9]};
+ }
+
+ return new Conv2DNode{inputs, outputs, param};
+ };
+}
+
+neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type,
+ const OperationFactory::Param ¶m)
+{
+ return _map.at(type)(param);
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OPERATION_FACTORY_H__
+#define __OPERATION_FACTORY_H__
+
+#include <unordered_map>
+
+#include "model/operation/Node.h"
+#include "NeuralNetworks.h"
+
+/**
+ * @brief A class to create a neurun operation object from NN API input parameters
+ */
+class OperationFactory
+{
+public:
+ struct Param
+ {
+ uint32_t input_count;
+ const uint32_t *inputs;
+ uint32_t output_count;
+ const uint32_t *outputs;
+ };
+
+public:
+ using Generator =
+ std::function<neurun::model::operation::Node *(const OperationFactory::Param &)>;
+
+public:
+ static OperationFactory &instance();
+
+private:
+ OperationFactory();
+
+public:
+ neurun::model::operation::Node *create(ANeuralNetworksOperationType,
+ const OperationFactory::Param ¶m);
+ // TODO add "register" method for separating registration, possibly supporting custom-ops
+
+private:
+ std::unordered_map<ANeuralNetworksOperationType, Generator> _map;
+};
+
+#endif // __OPERATION_FACTORY_H__
void Conv2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
-Conv2DNode::Conv2DNode(const model::operation::Node::InitParam &init_param)
- : model::operation::Node{OperandConstraint::createExact(3u)}
+Conv2DNode::Conv2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m)
+ : model::operation::Node{OperandConstraint::createExact(3u), inputs, outputs}, _param{param}
{
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
-
- setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
- setOutputs({init_param.outputs[0]});
-
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Activation Index
-
- _param.explicit_padding = false;
-
- _param.padding_code_index = operand::Index{init_param.inputs[3]};
- _param.hstride_index = operand::Index{init_param.inputs[4]};
- _param.vstride_index = operand::Index{init_param.inputs[5]};
- _param.activation_index = operand::Index{init_param.inputs[6]};
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding_left index
- // 4 -> Padding_right index
- // 5 -> Padding_top index
- // 6 -> Padding_bottom index
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) INdex
- // 9 -> Activation Index
-
- _param.explicit_padding = true;
-
- _param.padding_left_index = operand::Index{init_param.inputs[3]};
- _param.padding_right_index = operand::Index{init_param.inputs[4]};
- _param.padding_top_index = operand::Index{init_param.inputs[5]};
- _param.padding_bottom_index = operand::Index{init_param.inputs[6]};
-
- _param.hstride_index = operand::Index{init_param.inputs[7]};
- _param.vstride_index = operand::Index{init_param.inputs[8]};
-
- _param.activation_index = operand::Index{init_param.inputs[9]};
- }
}
} // namespace operation
class Conv2DNode : public model::operation::Node
{
public:
- Conv2DNode(const model::operation::Node::InitParam &);
-
enum Input
{
INPUT = 0,
};
public:
+ Conv2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, const Param ¶m);
+
+public:
virtual void accept(NodeVisitor &&) const override;
virtual std::string getName() const override { return "Conv2D"; }
namespace operation
{
+Node::Node(OperandConstraint input_constr, const operand::IndexSet &inputs,
+ const operand::IndexSet &outputs)
+ : _input_constr{input_constr}, _inputs{inputs}, _outputs{outputs}
+{
+}
+
Node::Node(OperandConstraint input_constr) : _input_constr{input_constr} {}
Node::~Node() = default;
};
public:
+ Node(OperandConstraint input_constr, const operand::IndexSet &inputs,
+ const operand::IndexSet &outputs);
Node(OperandConstraint input_constr);
virtual ~Node();
void setOutputs(const operand::IndexSet &indexes);
private:
+ OperandConstraint _input_constr;
operand::IndexSet _inputs;
operand::IndexSet _outputs;
- OperandConstraint _input_constr;
};
} // namespace operation
#include "model/operation/ConcatNode.h"
#include "model/operand/Index.h"
#include "model/operand/IndexSet.h"
+#include "frontend/wrapper/OperationFactory.h"
#include <stdexcept>
using Index = neurun::model::operand::IO::Index;
using IndexSet = neurun::model::operand::IndexSet;
-using GraphNodeInitParam = neurun::model::operation::Node::InitParam;
+using GraphNodeInitParam = neurun::model::operation::Node::InitParam; // TODO remove this
TEST(graph_operation_setIO, operation_setIO_conv)
{
using GraphNode = neurun::model::operation::Conv2DNode;
auto conv =
- nnfw::cpp14::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
+ std::unique_ptr<GraphNode>{dynamic_cast<GraphNode *>(OperationFactory::instance().create(
+ ANEURALNETWORKS_CONV_2D, {7, params.data(), 1, &outoperand}))};
ASSERT_EQ(conv->getInputs().at(Index{0}).value(), params[0]);
conv->setInputs({8, 9, 10});
ASSERT_NE(conv->getInputs().at(Index{0}).value(), params[0]);