* Remove `INode`, `AbstractNode`, `Node`, `OpDescription` classes.
* Add `Operation` class, derive all `Op`s from it.
* Modify `Graph` to work with `Operation`s instead of `Node`.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
-set(SOURCES "modelIR/graph.cpp"
+set(SOURCES "modelIR/Graph.cpp"
"modelIR/Index.cpp"
"modelIR/ir_dot_builder.cpp"
"modelIR/IrDotDumper.cpp"
"modelIR/ir_dot_node_info.cpp"
- "modelIR/ir_node.cpp"
- "modelIR/operation.cpp"
+ "modelIR/Operation.cpp"
"modelIR/Shape.cpp"
"modelIR/ShapeInference.cpp"
"modelIR/Tensor.cpp"
#include <set>
#include <algorithm>
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
namespace nnc {
namespace mir {
/**
- * @brief replace all usages of node `node` with node `with`
+ * @brief replace all usages of operation `op` with node `with`
* (i.e. all references in previous/next nodes )
- * @param inode a node to replace
- * @param with a node to use as a replacement
+ * @param op the operation to replace
+ * @param with the operation to use as a replacement
*/
-static void replaceUsages(const INode* node, INode* with) {
- auto with_node = dynamic_cast<AbstractNode*>(with);
- assert(with_node);
+static void replaceUsages(const Operation* op, Operation* with) {
//For each output replace prev references to `node` by `with`
- for (auto out : node->getNextNodes()) {
- auto anode = dynamic_cast<AbstractNode*>(out);
- assert(anode && "Unexpected node type");
-
- for (auto& prev : anode->getMutablePrevNodes()) {
- if (prev.node == node)
- prev.node = with;
+ for (auto out : op->getNextNodes()) {
+ for (auto& prev : out->getMutablePrevNodes()) {
+ if (prev.op == op)
+ prev.op = with;
}
}
- with_node->getMutableNextNodes() = node->getNextNodes();
+ with->getMutableNextNodes() = op->getNextNodes();
//For each input replace next references to `node` by `with`
- for (auto& in : node->getPrevNodes()) {
- auto anode = dynamic_cast<AbstractNode*>(in.node);
- assert(anode && "Unexpected node type");
-
- for (auto& next : anode->getMutableNextNodes()) {
- if (next == node)
+ for (auto& in : op->getPrevNodes()) {
+ for (auto& next : in.op->getMutableNextNodes()) {
+ if (next == op)
next = with;
}
}
- with_node->getMutablePrevNodes() = node->getPrevNodes();
+ with->getMutablePrevNodes() = op->getPrevNodes();
}
-INode::Ref Graph::getInput(const std::string& name) {
+Operation* Graph::getInput(const std::string& name) {
auto it = _inputs.find(name);
if (it == _inputs.end())
return nullptr;
return it->second;
}
-INode::Ref Graph::getOutput(const std::string& name) {
+Operation* Graph::getOutput(const std::string& name) {
auto it = _outputs.find(name);
if (it == _outputs.end())
return nullptr;
}
void Graph::accept(IVisitor* visitor) {
- std::deque<INode::Ref> q;
- std::set<INode::Ref> known_nodes;
+ std::deque<Operation*> q;
+ std::set<Operation*> known_ops;
for (const auto& e : _inputs) {
q.push_back(e.second);
- known_nodes.insert(e.second); //Consider all input _nodes resolved by default
+ known_ops.insert(e.second); //Consider all input _ops resolved by default
}
//BFS
q.pop_front();
n->accept(visitor);
for (auto out : n->getNextNodes()) {
- if (known_nodes.count(out) == 0) {
+ if (known_ops.count(out) == 0) {
bool allInputsResolved = true;
for (auto in : out->getPrevNodes()) {
- if (known_nodes.count(in.node) == 0) {
+ if (known_ops.count(in.op) == 0) {
allInputsResolved = false;
}
}
if (allInputsResolved) {
- known_nodes.insert(out);
+ known_ops.insert(out);
q.push_back(out);
}
}
}
Graph::~Graph() {
- for (auto& node : _nodes) {
+ for (auto& node : _ops) {
delete node;
}
}
-void Graph::markOutput(INode::Ref node) {
- auto it = _outputs.find(node->getName());
+void Graph::markOutput(Operation* op) {
+ auto it = _outputs.find(op->getName());
if (it != _outputs.end()) {
throw std::runtime_error("Output node with same name already exists");
}
- _outputs[node->getName()] = node;
+ _outputs[op->getName()] = op;
}
-std::vector<INode::Ref> Graph::collectInputs() {
- std::vector<INode::Ref> res;
+std::vector<Operation*> Graph::collectInputs() {
+ std::vector<Operation*> res;
for (auto& e : _inputs) {
res.emplace_back(e.second);
}
return res;
}
-std::vector<INode::Ref> Graph::collectOutputs() {
- std::vector<INode::Ref> res;
+std::vector<Operation*> Graph::collectOutputs() {
+ std::vector<Operation*> res;
for (auto& e : _outputs) {
res.emplace_back(e.second);
}
return res;
}
-void Graph::replaceNode(const INode* node, INode* with) {
- auto in = _inputs.find(node->getName());
+void Graph::replaceNode(const Operation* op, Operation* with) {
+ auto in = _inputs.find(op->getName());
if (in != _inputs.end()) {
(*in).second = with;
}
- auto out_it = _outputs.find(node->getName());
+ auto out_it = _outputs.find(op->getName());
if (out_it != _outputs.end()) {
(*out_it).second = with;
}
- replaceUsages(node, with);
+ replaceUsages(op, with);
- _nodes.erase(std::remove_if(_nodes.begin(), _nodes.end(), [node] (INode::Ref n) {
- return n == node;
- }), _nodes.end());
+ _ops.erase(std::remove_if(_ops.begin(), _ops.end(), [op] (Operation* n) {
+ return n == op;
+ }), _ops.end());
}
-Node<ops::VariableOp>* Graph::replaceWithInputNode(const INode* node) {
- auto in = create<ops::VariableOp>(node->getName());
- assert(node->getOperation()->getNumOutputs() <= 1
+ops::VariableOp* Graph::replaceWithInputNode(const Operation* op) {
+ auto in = create<ops::VariableOp>(op->getName());
+ assert(op->getNumOutputs() <= 1
&& "Only operations with single output value can be replaced with input node");
- assert(node->getNextNodes().size() <= 1
+ assert(op->getNextNodes().size() <= 1
&& "Node with multiple outputs cannot be changed into input");
- replaceNode(node, in);
+ replaceNode(op, in);
//replaceNode adds all connections of original node,
//but for input node we don't need input connections
- //
- //cast is safe since we know graph creates only AbstractNode(s)
- static_cast<AbstractNode*>(in)->getMutablePrevNodes().clear();
+ in->getMutablePrevNodes().clear();
- delete node;
+ delete op;
- return static_cast<Node<ops::VariableOp>*>(in);
+ return dynamic_cast<ops::VariableOp*>(in);
}
void Graph::replaceInputNodes(const std::vector<std::string>& new_inputs) {
- std::vector<INode::Ref> nodes_to_replace;
+ std::vector<Operation*> ops_to_replace;
std::set<std::string> new_input_set(new_inputs.begin(), new_inputs.end());
- for (auto& n : _nodes) {
- if (new_input_set.count(n->getName()) != 0) {
- nodes_to_replace.push_back(n);
+ for (auto& op : _ops) {
+ if (new_input_set.count(op->getName()) != 0) {
+ ops_to_replace.push_back(op);
}
}
_inputs.clear();
- for (auto& n : nodes_to_replace) {
- replaceWithInputNode(n);
+ for (auto& op : ops_to_replace) {
+ replaceWithInputNode(op);
}
}
std::set<std::string> new_outputs_set(new_outputs.begin(), new_outputs.end());
- for (auto& n : _nodes) {
- if (new_outputs_set.count(n->getName()) != 0) {
- markOutput(n);
+ for (auto& op : _ops) {
+ if (new_outputs_set.count(op->getName()) != 0) {
+ markOutput(op);
}
}
}
#include <iostream>
#include "core/modelIR/IrDotDumper.h"
-namespace nnc
-{
-namespace mir
-{
+namespace nnc {
+namespace mir {
-static std::vector<Shape> getInputShapes(OpDescription &op)
-{
+static std::vector<Shape> getInputShapes(Operation& op) {
std::vector<Shape> shapes;
for (std::size_t i = 0; i < op.getNumInputs(); ++i)
{
return shapes;
}
-static std::vector<Shape> getOutputShapes(const OpDescription &op)
-{
+static std::vector<Shape> getOutputShapes(const Operation& op) {
std::vector<Shape> shapes;
for (std::size_t i = 0; i < op.getNumOutputs(); ++i)
{
return shapes;
}
-void IrDotDumper::visit(INode *node, ops::BiasAddOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("BiasAdd", node->getName())
+void IrDotDumper::visit(ops::BiasAddOp &op) {
+ auto nodeInfo = DotIrNodeInfo().withType("BiasAdd", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withKernelShape(op.getWeights().getShape());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::CappedReluOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("CappedRelu", node->getName())
+void IrDotDumper::visit(ops::CappedReluOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("CappedRelu", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withMisc("Cap", op.getCap());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::ConcatOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("Concat", node->getName())
+void IrDotDumper::visit(ops::ConcatOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("Concat", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withMisc("Axis", op.getAxis());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::Conv2DOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("Conv2D", node->getName())
+void IrDotDumper::visit(ops::Conv2DOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("Conv2D", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withKernelShape(op.getKernel().getShape())
.withPadType(op.getPaddingType())
.withStride(op.getStrides());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::DepthwiseConv2DOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("DepthwiseConv2D", node->getName())
+void IrDotDumper::visit(ops::DepthwiseConv2DOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("DepthwiseConv2D", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withKernelShape(op.getKernel().getShape())
.withPadType(op.getPaddingType())
.withStride(op.getStrides());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::FullyConnectedOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("FullyConnected", node->getName())
+void IrDotDumper::visit(ops::FullyConnectedOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("FullyConnected", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withKernelShape(op.getWeights().getShape());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::SoftmaxOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("Softmax", node->getName())
+void IrDotDumper::visit(ops::SoftmaxOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("Softmax", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withMisc("Axis", op.getAxis());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::PoolOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("Pool2D", node->getName())
+void IrDotDumper::visit(ops::PoolOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("Pool2D", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withShape("PoolWindow", op.getWindowShape())
.withPoolType(op.getPoolingType())
.withStride(op.getStrides());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::ReluOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("ReLU", node->getName())
+void IrDotDumper::visit(ops::ReluOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("ReLU", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::ReshapeOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("Reshape", node->getName())
+void IrDotDumper::visit(ops::ReshapeOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("Reshape", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::VariableOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("Input", node->getName())
+void IrDotDumper::visit(ops::VariableOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("Input", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::BatchNormOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("BatchNorm", node->getName())
+void IrDotDumper::visit(ops::BatchNormOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("BatchNorm", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withMisc("Moving Average Fraction", op.getMovingAvgFraction())
.withMisc("Eps", op.getEps())
.withMisc("Spatial", op.getSpatial());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::ScaleOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("ScaleOp", node->getName())
+void IrDotDumper::visit(ops::ScaleOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("ScaleOp", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withShape("Scale Tensor", op.getWeights().getShape());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::DropoutOp &op)
-{
- auto nodeInfo = DotIrNodeInfo().withType("DropoutOp", node->getName())
+void IrDotDumper::visit(ops::DropoutOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("DropoutOp", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
.withMisc("DropRate", op.getRate());
- dotBuilder.updateWithNode(node, nodeInfo);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode *node, ops::DeConv2DOp &op) {
- auto node_info = DotIrNodeInfo().withType("DeConv2D", node->getName())
- .withInShapes(getInputShapes(op))
- .withOutShapes(getOutputShapes(op))
- .withKernelShape(op.getKernel().getShape())
- .withPadType(op.getPaddingType())
- .withStride(op.getStrides());
+void IrDotDumper::visit(ops::DeConv2DOp& op) {
+ auto node_info = DotIrNodeInfo().withType("DeConv2D", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op))
+ .withKernelShape(op.getKernel().getShape())
+ .withPadType(op.getPaddingType())
+ .withStride(op.getStrides());
- dotBuilder.updateWithNode(node, node_info);
+ dotBuilder.updateWithOp(&op, node_info);
}
-void IrDotDumper::visit(INode *node, ops::EluOp &op) {
- auto node_info = DotIrNodeInfo().withType("EluOp", node->getName())
- .withInShapes(getInputShapes(op))
- .withOutShapes(getOutputShapes(op))
- .withMisc("Alpha", op.getAlpha());
-
- dotBuilder.updateWithNode(node, node_info);
+void IrDotDumper::visit(ops::EluOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("EluOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op))
+ .withMisc("Alpha", op.getAlpha());
}
-void IrDotDumper::visit(INode *node, ops::TanhOp &op) {
- auto node_info = DotIrNodeInfo().withType("TanhOp", node->getName())
- .withInShapes(getInputShapes(op))
- .withOutShapes(getOutputShapes(op));
+void IrDotDumper::visit(ops::TanhOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("TanhOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithNode(node, node_info);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void mir::IrDotDumper::visit(INode *node, ops::ElementwiseOp &op) {
- auto node_info = DotIrNodeInfo().withType("ElementwiseOp", node->getName())
- .withInShapes(getInputShapes(op))
- .withOutShapes(getOutputShapes(op))
- .withMisc("Operation", ( int ) op.getOpType());
+void mir::IrDotDumper::visit(ops::ElementwiseOp& op) {
+ auto nodeInfo = DotIrNodeInfo().withType("TanhOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op))
+ .withMisc("Operation", ( int ) op.getOpType());
- dotBuilder.updateWithNode(node, node_info);
+ dotBuilder.updateWithOp(&op, nodeInfo);
}
-void IrDotDumper::visit(INode* node, ops::SqueezeOp& op) {
- auto node_info = DotIrNodeInfo().withType("SqueezeOp", node->getName())
- .withInShapes(getInputShapes(op))
- .withOutShapes(getOutputShapes(op));
+void IrDotDumper::visit(ops::SqueezeOp& op) {
+ auto node_info = DotIrNodeInfo().withType("SqueezeOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op));
for (auto dim : op.getDimsToSqueeze()) {
node_info.withMisc("SqueezeDim", dim);
}
- dotBuilder.updateWithNode(node, node_info);
+ dotBuilder.updateWithOp(&op, node_info);
}
-void mir::IrDotDumper::visit(INode* node, ops::PadOp& op) {
- auto node_info = DotIrNodeInfo().withType("PadOp", node->getName())
+void mir::IrDotDumper::visit(ops::PadOp& op) {
+ auto node_info = DotIrNodeInfo().withType("PadOp", op.getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op));
- dotBuilder.updateWithNode(node, node_info);
+ dotBuilder.updateWithOp(&op, node_info);
}
} // namespace mir
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "core/modelIR/Operation.h"
+#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/SoftmaxOp.h"
+#include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/DepthwiseConv2DOp.h"
+#include "core/modelIR/operations/Conv2DOp.h"
+#include "core/modelIR/operations/Deconv2DOp.h"
+#include "core/modelIR/operations/PoolOp.h"
+#include "core/modelIR/operations/VariableOp.h"
+#include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/EluOp.h"
+#include "core/modelIR/operations/ConcatOp.h"
+#include "core/modelIR/operations/BiasAddOp.h"
+#include "core/modelIR/operations/BatchNormOp.h"
+#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/DropoutOp.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/ReshapeOp.h"
+#include "core/modelIR/operations/PadOp.h"
+
+#include <cassert>
+
+namespace nnc {
+namespace mir {
+
+Operation::Operation(Type type, std::size_t max_inputs, std::size_t max_outputs)
+ : _type(type), _max_inputs(max_inputs), _max_outputs(max_outputs) {
+ _inputs.resize(max_inputs);
+}
+
+void Operation::connectInputTo(int inputIndex, const IODescriptor& descriptor) {
+ descriptor.op->_outputs.emplace_back(this);
+ _inputs[inputIndex] = descriptor;
+}
+
+const IODescriptor Operation::getOutput(std::size_t index) {
+ return IODescriptor{.op = this, .index = index};
+}
+
+const Shape& Operation::getInputShape(std::size_t index) const {
+ assert(index < getNumInputs());
+ return _inputShapes.at(index);
+}
+
+void Operation::setInputShape(std::size_t index, const Shape& shape) {
+ assert(index < getNumInputs());
+ _inputShapes[index] = shape;
+}
+
+const Shape& Operation::getOutputShape(std::size_t index) const {
+ assert(index < getNumOutputs());
+ return _outputShapes.at(index);
+}
+
+void Operation::setOutputShape(std::size_t index, const Shape& shape) {
+ assert(index < getNumOutputs());
+ _outputShapes[index] = shape;
+}
+
+void Operation::accept(IVisitor* v) {
+ switch (getType()) {
+#define HANDLE_OP(OpType, OpClass) \
+ case Type::OpType: \
+ v->visit(static_cast<ops::OpClass&>(*this)); \
+ break;
+#include "core/modelIR/operations/operations.lst.h"
+#undef HANDLE_OP
+ }
+}
+
+} // namespace mir
+} // namespace nnc
using nnc::mir::Shape;
template<class Op>
-void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &outShape)
+void fillHWShapesForPaddedOperations(Op& op, const Shape &windowShape, Shape &outShape)
{
auto &strides = op.getStrides();
auto &inShape = op.getInputShape(0);
op.setPadding(inRank - 1, 0);
}
-void ShapeInference::visit(INode::Ref node, ops::ConcatOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::ConcatOp& op) {
+ fillInputShapes(op);
int32_t axis = op.getAxis();
Shape outShape;
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(INode::Ref node, ops::Conv2DOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::Conv2DOp& op) {
+ fillInputShapes(op);
Shape outShape;
auto &kernel = op.getKernel();
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(INode::Ref node, ops::VariableOp &op)
-{
+void ShapeInference::visit(ops::VariableOp& op) {
(void)op;
- (void)node;
// No need to do anything for inputs. These should be set by user
}
-void ShapeInference::fillInputShapes(INode::Ref node, OpDescription &op)
-{
+void ShapeInference::fillInputShapes(Operation& op) {
size_t i = 0;
- for (auto &in : node->getPrevNodes())
+ for (auto &in : op.getPrevNodes())
{
- const Shape &inShape = in.node->getOperation()->getOutputShape(in.index);
+ const Shape &inShape = in.op->getOutputShape(in.index);
op.setInputShape(i++, inShape);
}
}
-void ShapeInference::visit(INode::Ref node, ops::ReluOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::ReluOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::SoftmaxOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::SoftmaxOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::PoolOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::PoolOp& op) {
+ fillInputShapes(op);
Shape outShape;
auto &windowShape = op.getWindowShape();
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(INode::Ref node, ops::FullyConnectedOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::FullyConnectedOp& op) {
+ fillInputShapes(op);
const Shape &inShape = op.getInputShape(0);
const Shape &wShape = op.getWeights().getShape();
const int32_t weightsRank = wShape.rank();
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(INode::Ref node, ops::CappedReluOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::CappedReluOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::DepthwiseConv2DOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::DepthwiseConv2DOp& op) {
+ fillInputShapes(op);
Shape outShape;
auto &kernelShape = op.getKernel().getShape();
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(INode::Ref node, ops::BiasAddOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::BiasAddOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::ReshapeOp &op)
-{
+void ShapeInference::visit(ops::ReshapeOp& op) {
// Reshape should have it's output shape filled by importer/user
- fillInputShapes(node, op);
+ fillInputShapes(op);
auto& inShape = op.getInputShape(0);
auto outShape = op.getOutputShape(0);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(INode::Ref node, ops::ScaleOp &op)
-{
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::ScaleOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::DropoutOp &op) {
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::DropoutOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::BatchNormOp &op) {
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::BatchNormOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::DeConv2DOp &op) {
+void ShapeInference::visit(ops::DeConv2DOp& op) {
/**
see https://github.com/tensorflow/tensorflow/issues/2118
for reason why the output shape is what it is.
output = input * stride + filter - stride # VALID
output = input * stride - stride + 1 # SAME
*/
- fillInputShapes(node, op);
+ fillInputShapes(op);
Shape out_shape;
Shape in_shape = op.getInputShape(0);
op.setOutputShape(0, out_shape);
}
-void ShapeInference::visit(INode *node, ops::EluOp &op) {
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::EluOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::TanhOp &op) {
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::TanhOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode::Ref node, ops::ElementwiseOp &op) {
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::ElementwiseOp& op) {
+ fillInputShapes(op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(INode* node, ops::SqueezeOp& op) {
- fillInputShapes(node, op);
+void ShapeInference::visit(ops::SqueezeOp& op) {
+ fillInputShapes(op);
assert(op.getNumInputs() == 1);
const auto& input_shape = op.getInputShape(0);
op.setOutputShape(0, output_shape);
}
-void ShapeInference::visit(INode* node, ops::PadOp& op) {
+void ShapeInference::visit(ops::PadOp& op) {
/**
padded size of each dimension D of the output is:
paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]
*/
- fillInputShapes(node, op);
+ fillInputShapes(op);
const Shape& in_shape = op.getInputShape(0);
Shape out_shape;
namespace nnc {
namespace mir {
-#define OP_TYPE(OpType) void Visitor::visit(INode*, ops::OpType&) {}
+#define HANDLE_OP(OpType, OpClass) void Visitor::visit(ops::OpClass&) {}
#include "core/modelIR/operations/operations.lst.h"
-#undef OP_TYPE
+#undef HANDLE_OP
} // namespace mir
} // namespace nnc
namespace mir
{
-void IrDotBuilder::updateWithNode(INode *node, const DotIrNodeInfo &irNodeInfo)
+void IrDotBuilder::updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo)
{
- addNode(node, irNodeInfo);
- for (auto &prev : node->getPrevNodes())
+ addNode(op, irNodeInfo);
+ for (auto &prev : op->getPrevNodes())
{
- addEdge(prev.node, node);
+ addEdge(prev.op, op);
}
}
os << "digraph D {" << std::endl << dot.str() << std::endl << "}" << std::endl;
}
-void IrDotBuilder::addNode(INode *node, const DotIrNodeInfo &irNode)
+void IrDotBuilder::addNode(Operation* op, const DotIrNodeInfo& irNode)
{
- dot << node->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl;
+ dot << op->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl;
}
-void IrDotBuilder::addEdge(INode *node1, INode *node2)
+void IrDotBuilder::addEdge(Operation* op1, Operation* op2)
{
- dot << node1->getId() << " -> " << node2->getId() << ";" << std::endl;
+ dot << op1->getId() << " -> " << op2->getId() << ";" << std::endl;
}
} // namespace mir
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <core/modelIR/ir_node.h>
-
-#include "core/modelIR/ir_node.h"
-
-namespace nnc
-{
-namespace mir
-{
-
-const std::vector<INode::Ref> &AbstractNode::getNextNodes() const { return _outputs; }
-
-const std::vector<INode::IODescriptor> &AbstractNode::getPrevNodes() const
-{
- return _inputs;
-}
-
-void AbstractNode::connectInputTo(const int inputIndex, const IODescriptor &descriptor)
-{
- AbstractNode *buf_ptr = dynamic_cast<AbstractNode *>(descriptor.node);
- assert(buf_ptr);
- buf_ptr->addNextNode(this);
- _inputs[inputIndex] = descriptor;
-}
-
-void AbstractNode::addNextNode(INode::Ref const node) { _outputs.emplace_back(node); }
-
-const INode::IODescriptor AbstractNode::getOutput(size_t index)
-{
- return IODescriptor{.node = this, .index = index};
-}
-
-AbstractNode::AbstractNode(size_t num_inputs) {
- _inputs.resize(num_inputs);
-}
-
-std::vector<INode::IODescriptor>& AbstractNode::getMutablePrevNodes() {
- return _inputs;
-}
-
-std::vector<INode::Ref>& AbstractNode::getMutableNextNodes() {
- return _outputs;
-}
-
-} // namespace mir
-} // namespace nnc
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cassert>
-
-#include "core/modelIR/operations/operation.h"
-
-namespace nnc
-{
-namespace mir
-{
-
-const Shape &OpDescription::getInputShape(const size_t index) const {
- assert(index < getNumInputs());
- return _inputShapes.at(index);
-}
-
-void OpDescription::setInputShape(const size_t index, const Shape &shape) {
- assert(index < getNumInputs());
- _inputShapes[index] = shape;
-}
-
-const Shape &OpDescription::getOutputShape(const size_t index) const {
- assert(index < getNumOutputs());
- return _outputShapes.at(index);
-}
-
-void OpDescription::setOutputShape(const size_t index, const Shape &shape) {
- assert(index < getNumOutputs());
- _outputShapes[index] = shape;
-}
-
-OpDescription::OpDescription(const size_t max_inputs, const size_t max_outputs)
- : _max_inputs(max_inputs), _max_outputs(max_outputs) {
-}
-
-size_t OpDescription::getNumInputs() const { return _max_inputs; }
-
-size_t OpDescription::getNumOutputs() const { return _max_outputs; }
-
-} // namespace mir
-} // namespace nnc
#include <type_traits>
#include <unordered_map>
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/operations/VariableOp.h"
-#include "core/modelIR/ir_node.h"
namespace nnc {
namespace mir {
virtual ~Graph();
template <typename T, typename ...Args>
- //make this method callable only with OpDescription subclasses
- typename std::enable_if<std::is_convertible<T*, OpDescription*>::value, INode::Ref>::type
+ //make this method callable only with Operation subclasses
+ typename std::enable_if<std::is_convertible<T*, Operation*>::value, Operation*>::type
create(const std::string& name, Args&&...args) {
- auto node = Node<T>::createNode(name, _lastNodeId++, std::forward<Args>(args)...);
- registerNode(node);
- return node;
+ auto op = new T(std::forward<Args>(args)...);
+ op->setId(_lastNodeId++);
+ op->setName(name);
+ registerOp(op);
+ return op;
}
void accept(IVisitor* visitor);
- void markOutput(INode::Ref node);
- INode::Ref getInput(const std::string& name);
- INode::Ref getOutput(const std::string& name);
+ void markOutput(Operation* op);
+ Operation* getInput(const std::string& name);
+ Operation* getOutput(const std::string& name);
/**
* @brief Returns all inputs from graph
* @returns vector containing all graph input nodes
*/
- std::vector<INode::Ref> collectInputs();
+ std::vector<Operation*> collectInputs();
/**
* @brief Returns all outputs from graph
* @returns vector containing all graph outputs nodes
*/
- std::vector<INode::Ref> collectOutputs();
+ std::vector<Operation*> collectOutputs();
/**
* @brief Subsitude node in graph with another keeping all edges
- * @param node Node to subsitude
+ * @param op Node to subsitude
* @param with Node to place instead
*/
- void replaceNode(const INode* node, INode* with);
+ void replaceNode(const Operation* op, Operation* with);
/**
* @brief Replaces referenced node with input(VariableOp) node
- * @param node Node to replace
+ * @param op Node to replace
* @return Input node which is placed in graph instead of passed node
* @warning deletes passed node
*/
- Node<ops::VariableOp>* replaceWithInputNode(const INode* node);
+ ops::VariableOp* replaceWithInputNode(const Operation* op);
/**
* @brief Change graph inputs to nodes with names in newInputs
void replaceOutputNodes(const std::vector<std::string>& new_outputs);
private:
- void registerNode(INode::Ref node) {
- _nodes.push_back(node);
+ void registerOp(Operation* op) {
+ _ops.push_back(op);
}
- //TODO: maybe make user to mark input _nodes in a more obvious way
- void registerNode(Node<ops::VariableOp>* node) {
- auto it = _inputs.find(node->getName());
+ //TODO: maybe make user to mark input _ops in a more obvious way
+ void registerOp(ops::VariableOp* op) {
+ auto it = _inputs.find(op->getName());
if( it != _inputs.end()) {
throw std::runtime_error("Input name collision");
}
- _inputs.insert(it, {node->getName(), node});
- _nodes.push_back(node);
+ _inputs.insert(it, {op->getName(), op});
+ _ops.push_back(op);
}
- std::vector<INode::Ref> _nodes;
+ std::vector<Operation*> _ops;
size_t _lastNodeId = 0;
- std::unordered_map<std::string, INode::Ref> _inputs;
- std::unordered_map<std::string, INode::Ref> _outputs;
+ std::unordered_map<std::string, Operation*> _inputs;
+ std::unordered_map<std::string, Operation*> _outputs;
};
} // namespace mir
#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/EluOp.h"
-#include "core/modelIR/operations/operation.h"
#include "core/modelIR/operations/ConcatOp.h"
#include "core/modelIR/operations/BiasAddOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
* @brief Model IR visitor that can be used to output Model IR as a .dot graph.
* @usage Run on a Model IR graph as a visitor, and then call writeDot passing it a stream
*/
-class IrDotDumper : public IVisitor
-{
+class IrDotDumper : public IVisitor {
public:
- void visit(INode *node, ops::ConcatOp &op) override;
- void visit(INode *node, ops::ReluOp &op) override;
- void visit(INode *node, ops::Conv2DOp &op) override;
- void visit(INode *node, ops::DepthwiseConv2DOp &op) override;
- void visit(INode *node, ops::SoftmaxOp &op) override;
- void visit(INode *node, ops::PoolOp &op) override;
- void visit(INode *node, ops::FullyConnectedOp &op) override;
- void visit(INode *node, ops::CappedReluOp &op) override;
- void visit(INode *node, ops::BiasAddOp &op) override;
- void visit(INode *node, ops::VariableOp &op) override;
- void visit(INode *node, ops::ReshapeOp &op) override;
- void visit(INode *node, ops::ScaleOp &op) override;
- void visit(INode *node, ops::BatchNormOp &op) override;
- void visit(INode *node, ops::DropoutOp &op) override;
- void visit(INode *node, ops::DeConv2DOp &op) override;
- void visit(INode *node, ops::EluOp &op) override;
- void visit(INode *node, ops::TanhOp &op) override;
- void visit(INode *node, ops::ElementwiseOp &op) override;
- void visit(INode* node, ops::SqueezeOp& op) override;
- void visit(INode* node, ops::PadOp& op) override;
+ void visit(ops::ConcatOp& op) override;
+ void visit(ops::ReluOp& op) override;
+ void visit(ops::Conv2DOp& op) override;
+ void visit(ops::DepthwiseConv2DOp& op) override;
+ void visit(ops::SoftmaxOp& op) override;
+ void visit(ops::PoolOp& op) override;
+ void visit(ops::FullyConnectedOp& op) override;
+ void visit(ops::CappedReluOp& op) override;
+ void visit(ops::BiasAddOp& op) override;
+ void visit(ops::VariableOp& op) override;
+ void visit(ops::ReshapeOp& op) override;
+ void visit(ops::ScaleOp& op) override;
+ void visit(ops::BatchNormOp& op) override;
+ void visit(ops::DropoutOp& op) override;
+ void visit(ops::DeConv2DOp& op) override;
+ void visit(ops::EluOp& op) override;
+ void visit(ops::TanhOp& op) override;
+ void visit(ops::ElementwiseOp& op) override;
+ void visit(ops::SqueezeOp& op) override;
+ void visit(ops::PadOp& op) override;
void writeDot(std::ostream &os) { dotBuilder.writeDot(os); };
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_OPERATION_H_
+#define _NNC_CORE_IR_MODEL_OPERATION_H_
+
+#include <string>
+#include <map>
+#include "TensorVariant.h"
+#include "core/modelIR/Visitor.h"
+
+#include "Shape.h"
+
+namespace nnc {
+namespace mir {
+
+class Operation;
+
+struct IODescriptor {
+ Operation* op;
+ std::size_t index;
+};
+
+class Operation {
+public:
+ enum class Type {
+#define HANDLE_OP(OpType, OpClass) OpType,
+#include "core/modelIR/operations/operations.lst.h"
+#undef HANDLE_OP
+ };
+
+ virtual ~Operation() = default;
+
+ Type getType() const { return _type; }
+
+ std::size_t getId() const { return _id; }
+ void setId(std::size_t id) { _id = id; }
+
+ const std::string& getName() const { return _name; }
+ void setName(const std::string& name) { _name = name; }
+
+ std::size_t getNumInputs() const { return _max_inputs; }
+ std::size_t getNumOutputs() const { return _max_outputs; }
+
+ void connectInputTo(int inputIndex, const IODescriptor& descriptor);
+
+ const IODescriptor getOutput(std::size_t index);
+
+ const std::vector<IODescriptor>& getPrevNodes() const { return _inputs; }
+ const std::vector<Operation*>& getNextNodes() const { return _outputs; }
+
+ std::vector<IODescriptor>& getMutablePrevNodes() { return _inputs; }
+ std::vector<Operation*>& getMutableNextNodes() { return _outputs; }
+
+ const nnc::mir::Shape& getInputShape(std::size_t index) const;
+ const nnc::mir::Shape& getOutputShape(std::size_t index) const;
+ void setInputShape(std::size_t index, const nnc::mir::Shape& shape);
+ void setOutputShape(std::size_t index, const nnc::mir::Shape& shape);
+
+ void accept(IVisitor* v);
+
+protected:
+ Operation(Type type, std::size_t max_inputs, std::size_t max_outputs);
+
+private:
+ Type _type;
+ std::size_t _id;
+ std::string _name;
+ std::size_t _max_inputs;
+ std::size_t _max_outputs;
+ std::vector<IODescriptor> _inputs;
+ std::vector<Operation*> _outputs;
+ std::map<size_t, nnc::mir::Shape> _inputShapes;
+ std::map<size_t, nnc::mir::Shape> _outputShapes;
+};
+
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_OPERATION_H_
#include <limits>
#include "core/modelIR/Visitor.h"
-#include "core/modelIR/ir_node.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
{
class ShapeInference : public IVisitor {
- public:
- void visit(INode::Ref node, ops::ConcatOp &op) override;
- void visit(INode::Ref node, ops::Conv2DOp &op) override;
- void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override;
- void visit(INode::Ref node, ops::ReluOp &op) override;
- void visit(INode::Ref node, ops::SoftmaxOp &op) override;
- void visit(INode::Ref node, ops::PoolOp &op) override;
- void visit(INode::Ref node, ops::FullyConnectedOp &op) override;
- void visit(INode::Ref node, ops::CappedReluOp &op) override;
- void visit(INode::Ref node, ops::BiasAddOp &op) override;
- void visit(INode::Ref node, ops::ReshapeOp &op) override;
- void visit(INode::Ref node, ops::VariableOp &op) override;
- void visit(INode::Ref node, ops::ScaleOp &op) override;
- void visit(INode::Ref node, ops::BatchNormOp &op) override;
- void visit(INode::Ref node, ops::DropoutOp &op) override;
- void visit(INode::Ref node, ops::TanhOp &op) override;
- void visit(INode::Ref node, ops::ElementwiseOp &op) override;
- void visit(INode::Ref node, ops::DeConv2DOp &op) override;
- void visit(INode::Ref node, ops::EluOp &op) override;
- void visit(INode* node, ops::SqueezeOp& op) override;
- void visit(INode* node, ops::PadOp& op) override;
+public:
+ void visit(ops::ConcatOp& op) override;
+ void visit(ops::Conv2DOp& op) override;
+ void visit(ops::DepthwiseConv2DOp& op) override;
+ void visit(ops::ReluOp& op) override;
+ void visit(ops::SoftmaxOp& op) override;
+ void visit(ops::PoolOp& op) override;
+ void visit(ops::FullyConnectedOp& op) override;
+ void visit(ops::CappedReluOp& op) override;
+ void visit(ops::BiasAddOp& op) override;
+ void visit(ops::ReshapeOp& op) override;
+ void visit(ops::VariableOp& op) override;
+ void visit(ops::ScaleOp& op) override;
+ void visit(ops::BatchNormOp& op) override;
+ void visit(ops::DropoutOp& op) override;
+ void visit(ops::TanhOp& op) override;
+ void visit(ops::ElementwiseOp& op) override;
+ void visit(ops::DeConv2DOp& op) override;
+ void visit(ops::EluOp& op) override;
+ void visit(ops::SqueezeOp& op) override;
+ void visit(ops::PadOp& op) override;
protected:
- void fillInputShapes(INode::Ref node, OpDescription &op);
+ void fillInputShapes(Operation& op);
};
} // namespace mir
namespace nnc {
namespace mir {
-class INode;
-
//Forward declare operations as we don't need anything but references
-namespace ops
-{
- #define OP_TYPE(OpType) class OpType;
- #include "operations/operations.lst.h"
- #undef OP_TYPE
-}
+namespace ops {
+#define HANDLE_OP(OpType, OpClass) class OpClass;
+#include "operations/operations.lst.h"
+#undef HANDLE_OP
+} // namespace ops
/**
* @brief Visitor Interface declaration
*/
class IVisitor {
- public:
- #define OP_TYPE(OpType) virtual void visit(INode*, ops::OpType&) = 0;
- #include "operations/operations.lst.h"
- #undef OP_TYPE
+public:
+#define HANDLE_OP(OpType, OpClass) virtual void visit(ops::OpClass&) = 0;
+#include "operations/operations.lst.h"
+#undef HANDLE_OP
virtual ~IVisitor() = default;
};
* only need to define an implementation of `visit` for a subset of operations in the graph,
* while not doing anything for all others.
*/
-class Visitor: public IVisitor{
+class Visitor : public IVisitor {
public:
- #define OP_TYPE(OpType) virtual void visit(INode*, ops::OpType&) override;
- #include "operations/operations.lst.h"
- #undef OP_TYPE
+#define HANDLE_OP(OpType, OpClass) virtual void visit(ops::OpClass&) override;
+#include "operations/operations.lst.h"
+#undef HANDLE_OP
~Visitor() override = default;
};
#include <sstream>
-#include "core/modelIR/ir_node.h"
#include "core/modelIR/ir_dot_node_info.h"
namespace nnc
public:
explicit IrDotBuilder() = default;
- void updateWithNode(INode *node, const DotIrNodeInfo &irNodeInfo);
+ void updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo);
void writeDot(std::ostream &os);
private:
- void addNode(INode *node, const DotIrNodeInfo &irNode);
- void addEdge(INode *node1, INode *node2);
+ void addNode(Operation* op, const DotIrNodeInfo& irNode);
+ void addEdge(Operation* op1, Operation* op2);
std::stringstream dot;
};
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NNC_CORE_IR_MODEL_NODE_H_
-#define _NNC_CORE_IR_MODEL_NODE_H_
-
-#include <string>
-#include <vector>
-#include <iostream>
-
-#include "core/modelIR/operations/operation.h"
-#include "core/modelIR/Visitor.h"
-
-namespace nnc
-{
-namespace mir
-{
-
-class INode
-{
-public:
- using Ref = INode *;
-
- struct IODescriptor
- {
- INode* node; // Data source
- size_t index; // Output id
- };
-
- using IODescriptorVector = std::vector<IODescriptor>;
-
- virtual const std::vector<IODescriptor> &getPrevNodes() const = 0;
- virtual const std::vector<INode::Ref> &getNextNodes() const = 0;
-
- virtual size_t getId() const = 0;
-
- virtual OpDescription *getOperation() const = 0;
-
- virtual const std::string &getName() const = 0;
- virtual void setName(const std::string &name) = 0;
-
- virtual void accept(IVisitor *v) = 0;
-
- virtual const IODescriptor getOutput(const size_t index) = 0;
- virtual void connectInputTo(const int inputIndex, const IODescriptor &descriptor) = 0;
-
- virtual ~INode() = default;
-
-protected:
- virtual void addNextNode(const INode::Ref) = 0;
-};
-
-class AbstractNode : public INode
-{
-public:
- explicit AbstractNode(size_t num_inputs);
- const std::vector<IODescriptor> &getPrevNodes() const override;
- const std::vector<INode::Ref> &getNextNodes() const override;
- void connectInputTo(const int inputIndex, const IODescriptor &descriptor) override;
- const IODescriptor getOutput(const size_t index) override;
-
- std::vector<IODescriptor>& getMutablePrevNodes();
- std::vector<INode::Ref>& getMutableNextNodes();
-
- protected:
- virtual void addNextNode(INode::Ref const node) override;
-
-private:
- std::vector<IODescriptor> _inputs;
- std::vector<INode::Ref> _outputs;
-};
-
-
-struct NodeProperties
-{
- explicit NodeProperties(std::string name, const size_t id, OpDescription *op = nullptr)
- : name(std::move(name)), op(op), id(id)
- {
- }
-
- std::string name;
- OpDescription *op;
- const size_t id;
-
- NodeProperties(NodeProperties &&nodeProps) noexcept : name(std::move(nodeProps.name)), op(nodeProps.op), id(nodeProps.id)
- {
- nodeProps.op = nullptr;
- }
-};
-
-template <typename OpType>
-class Node : public AbstractNode
-{
-public:
- OpType *getOperation() const override { return static_cast<OpType*>(_props.op); }
-
- template <typename... Args>
- static Node<OpType> *createNode(const std::string &nodeName, size_t id, Args &&... args)
- {
- auto node =
- new Node(NodeProperties(nodeName, id, new OpType(std::forward<Args>(args)...)));
- return node;
- };
-
- size_t getId() const override { return _props.id; };
-
- const std::string &getName() const override { return _props.name; };
-
- void setName(const std::string &name) override { _props.name = name; }
-
- void accept(IVisitor *v) override
- {
- v->visit(this, *static_cast<OpType*>(_props.op));
- }
-
- ~Node() override {
- delete _props.op;
- }
-
-private:
- explicit Node(NodeProperties &&properties) : AbstractNode(properties.op->getNumInputs()), _props(std::move(properties)) {};
-
- NodeProperties _props;
-};
-
-} // namespace mir
-} // namespace nnc
-
-#endif //_NNC_CORE_IR_MODEL_NODE_H_
#ifndef _NNC_CORE_IR_MODEL_BATCH_NORM_H_
#define _NNC_CORE_IR_MODEL_BATCH_NORM_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class BatchNormOp : public OpDescription
+class BatchNormOp : public Operation
{
public:
explicit BatchNormOp(float movingAvgFraction, float eps, bool spatial) :
- OpDescription(1, 1),
+ Operation(Type::batchNorm, 1, 1),
_movingAvgFraction(movingAvgFraction),
_eps(eps),
_spatial(spatial)
#ifndef _NNC_CORE_IR_MODEL_BIAS_ADD_H_
#define _NNC_CORE_IR_MODEL_BIAS_ADD_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/TensorVariant.h"
namespace nnc
namespace ops
{
-class BiasAddOp : public OpDescription
+class BiasAddOp : public Operation
{
public:
- explicit BiasAddOp(const TensorVariant &weights) : OpDescription(1, 1), _weights(weights) {}
+ explicit BiasAddOp(const TensorVariant& weights) : Operation(Type::biasAdd, 1, 1),
+ _weights(weights) {}
const TensorVariant &getWeights() const { return _weights; }
#ifndef _NNC_CORE_IR_MODEL_CAPPED_RELU_H_
#define _NNC_CORE_IR_MODEL_CAPPED_RELU_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class CappedReluOp : public OpDescription {
+class CappedReluOp : public Operation {
public:
- explicit CappedReluOp(float cap) : OpDescription(1, 1), _cap(cap) {
+ explicit CappedReluOp(float cap) : Operation(Type::cappedReLU, 1, 1), _cap(cap) {
}
float getCap() const {
#include <limits>
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/Shape.h"
namespace nnc
/**
* @brief Description of tensor concatenation operation.
*/
-class ConcatOp : public OpDescription
+class ConcatOp : public Operation
{
public:
- ConcatOp(int num_inputs, int32_t axis) : OpDescription(num_inputs, 1), _axis(axis) {}
+ ConcatOp(int num_inputs, int32_t axis) : Operation(Type::concat, num_inputs, 1), _axis(axis) {}
int32_t getAxis() const
{
#include <vector>
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/operations/common.h"
#include "core/modelIR/TensorVariant.h"
namespace ops
{
-class Conv2DOp : public OpDescription
+class Conv2DOp : public Operation
{
public:
Conv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding)
- : OpDescription(1, 1), _kernel(kernel), _strides(strides),
+ : Operation(Type::conv2D, 1, 1), _kernel(kernel), _strides(strides),
_padding(padding)
{
_pads.resize(3);
#ifndef _NNC_CORE_IR_MODEL_DECONV_2D_H_
#define _NNC_CORE_IR_MODEL_DECONV_2D_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/operations/common.h"
#include "core/modelIR/TensorVariant.h"
namespace mir {
namespace ops {
-class DeConv2DOp : public OpDescription {
+class DeConv2DOp : public Operation {
public:
DeConv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding)
- : OpDescription(1, 1), _kernel(kernel), _strides(strides),
+ : Operation(Type::deConv2D, 1, 1), _kernel(kernel), _strides(strides),
_padding(padding) {
_pads.resize(3);
}
#include <vector>
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/operations/common.h"
namespace ops
{
-class DepthwiseConv2DOp : public OpDescription
+class DepthwiseConv2DOp : public Operation
{
public:
explicit DepthwiseConv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding)
- : OpDescription(1, 1), _kernel(kernel), _strides(strides), _padding(padding)
+ : Operation(Type::depthwiseConv, 1, 1), _kernel(kernel), _strides(strides), _padding(padding)
{
_pads.resize(_kernel.getShape().rank());
}
#ifndef _NNC_CORE_IR_MODEL_DROPOUT_H_
#define _NNC_CORE_IR_MODEL_DROPOUT_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class DropoutOp : public OpDescription {
+class DropoutOp : public Operation {
public:
- explicit DropoutOp(float rate) : OpDescription(1, 1), _rate(rate) {}
+ explicit DropoutOp(float rate) : Operation(Type::dropout, 1, 1), _rate(rate) {}
/**
* @return The ratio of random dropout
#ifndef _NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_
#define _NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc {
namespace mir {
namespace ops {
-class ElementwiseOp : public OpDescription {
+class ElementwiseOp : public Operation {
public:
enum class OpType {
* @param num_inputs Number of inputs
*/
explicit ElementwiseOp(OpType op_type, size_t num_inputs) :
- OpDescription(num_inputs, 1), _opType(op_type) {};
+ Operation(Type::elementwise, num_inputs, 1), _opType(op_type) {};
private:
OpType _opType;
#ifndef _NNC_CORE_IR_MODEL_ELU_H_
#define _NNC_CORE_IR_MODEL_ELU_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc {
namespace mir {
namespace ops {
-class EluOp : public OpDescription {
+class EluOp : public Operation {
public:
- explicit EluOp(float alpha) : OpDescription(1, 1), _alpha(alpha) {}
+ explicit EluOp(float alpha) : Operation(Type::ELU, 1, 1), _alpha(alpha) {}
float getAlpha() const {
return _alpha;
#ifndef _NNC_CORE_IR_MODEL_FULLY_CONNECTED_OP_H_
#define _NNC_CORE_IR_MODEL_FULLY_CONNECTED_OP_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/TensorVariant.h"
namespace nnc
namespace ops
{
-class FullyConnectedOp : public OpDescription
+class FullyConnectedOp : public Operation
{
public:
- explicit FullyConnectedOp(const TensorVariant &weights) : OpDescription(1, 1), _weights(weights) {}
+ explicit FullyConnectedOp(const TensorVariant& weights) : Operation(Type::fullyConnected, 1, 1),
+ _weights(weights) {}
const TensorVariant &getWeights() const { return _weights; }
#include <vector>
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/TensorVariant.h"
namespace mir {
namespace ops {
-class PadOp : public OpDescription {
+class PadOp : public Operation {
public:
enum class PaddingMode {
CONST,
};
explicit PadOp(PaddingMode paddingMode, int numDims, const TensorVariant& constant_value)
- : OpDescription(1, 1), _paddingMode(paddingMode),
+ : Operation(Type::pad, 1, 1), _paddingMode(paddingMode),
_numDims(numDims), _constant_value(constant_value) {
_paddings.resize(_numDims);
#include <vector>
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/operations/common.h"
#include "core/modelIR/Shape.h"
namespace ops
{
-class PoolOp : public OpDescription
+class PoolOp : public Operation
{
public:
enum class PoolingType
explicit PoolOp(const Shape &windowShape, const Shape &strides, PoolingType poolType,
PaddingType padding, BorderType borderType)
- : OpDescription(1, 1), _padding(padding), _poolingType(poolType),
+ : Operation(Type::pool, 1, 1), _padding(padding), _poolingType(poolType),
_borderType(borderType), _windowShape(windowShape), _strides(strides)
{
_pads.resize(_windowShape.rank());
#ifndef _NNC_CORE_IR_MODEL_RELU_H_
#define _NNC_CORE_IR_MODEL_RELU_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class ReluOp : public OpDescription
+class ReluOp : public Operation
{
public:
- explicit ReluOp() : OpDescription(1, 1) {}
+ explicit ReluOp() : Operation(Type::ReLU, 1, 1) {}
};
} // namespace ops
#pragma once
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class ReshapeOp : public OpDescription
+class ReshapeOp : public Operation
{
public:
- explicit ReshapeOp() : OpDescription(1, 1) {}
+ explicit ReshapeOp() : Operation(Type::reshape, 1, 1) {}
};
} // namespace ops
#ifndef _NNC_CORE_IR_MODEL_SCALE_H_
#define _NNC_CORE_IR_MODEL_SCALE_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class ScaleOp : public OpDescription
+class ScaleOp : public Operation
{
public:
- explicit ScaleOp(const TensorVariant &weights) : OpDescription(1, 1), _weights(weights) {}
+ explicit ScaleOp(const TensorVariant& weights) : Operation(Type::scale, 1, 1),
+ _weights(weights) {}
/**
* @return The input 1-dimensional scale tensor.
#ifndef _NNC_CORE_IR_MODEL_SOFTMAX_H_
#define _NNC_CORE_IR_MODEL_SOFTMAX_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/Shape.h"
namespace nnc
/**
* @brief description of softmax operation.
*/
-class SoftmaxOp : public OpDescription
+class SoftmaxOp : public Operation
{
public:
- explicit SoftmaxOp(int32_t axis) : OpDescription(1, 1), _axis(axis) {}
+ explicit SoftmaxOp(int32_t axis) : Operation(Type::softmax, 1, 1), _axis(axis) {}
int32_t getAxis() const
{
#ifndef _NNC_CORE_IR_MODEL_SQUEEZE_OP_H_
#define _NNC_CORE_IR_MODEL_SQUEEZE_OP_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc {
namespace mir {
namespace ops {
-class SqueezeOp : public OpDescription {
+class SqueezeOp : public Operation {
public:
explicit SqueezeOp(const std::vector<int32_t>& dims_to_squeeze) :
- OpDescription(1, 1),
- _dims_to_squeeze(dims_to_squeeze) {}
+ Operation(Type::squeeze, 1, 1), _dims_to_squeeze(dims_to_squeeze) {}
int32_t getNumSqueezeDims() {
return static_cast<int32_t>(_dims_to_squeeze.size());
#define _NNC_CORE_IR_MODEL_TANH_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc {
namespace mir {
namespace ops {
-class TanhOp : public OpDescription {
+class TanhOp : public Operation {
public:
- explicit TanhOp() : OpDescription(1, 1) {}
+ explicit TanhOp() : Operation(Type::tanh, 1, 1) {}
};
} // namespace ops
#ifndef _NNC_CORE_IR_MODEL_VARIABLE_H_
#define _NNC_CORE_IR_MODEL_VARIABLE_H_
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
namespace nnc
{
namespace ops
{
-class VariableOp : public OpDescription
+class VariableOp : public Operation
{
public:
- explicit VariableOp() : OpDescription(0, 1) {}
+ explicit VariableOp() : Operation(Type::variable, 0, 1) {}
};
} // namespace ops
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NNC_CORE_IR_MODEL_OPERATION_H_
-#define _NNC_CORE_IR_MODEL_OPERATION_H_
-
-#include <string>
-#include <map>
-#include "core/modelIR/TensorVariant.h"
-
-#include "core/modelIR/Shape.h"
-
-namespace nnc
-{
-namespace mir
-{
-
-class OpDescription {
- public:
- explicit OpDescription(const size_t max_inputs, const size_t max_outputs);
- virtual ~OpDescription() = default;
-
- size_t getNumInputs() const;
- size_t getNumOutputs() const;
-
- const nnc::mir::Shape &getInputShape(const size_t index) const;
- virtual void setInputShape(const size_t index, const nnc::mir::Shape &shape);
-
- virtual const nnc::mir::Shape &getOutputShape(const size_t index) const;
- void setOutputShape(const size_t index, const nnc::mir::Shape &shape);
-
- private:
- size_t _max_inputs;
- size_t _max_outputs;
-
- std::map<size_t, nnc::mir::Shape> _inputShapes;
- std::map<size_t, nnc::mir::Shape> _outputShapes;
-};
-
-} // namespace mir
-} // namespace nnc
-
-#endif //_NNC_CORE_IR_MODEL_OPERATION_H_
* limitations under the License.
*/
-#ifndef OP_TYPE
-#error "You should define OP_TYPE before including this file"
-#endif //OP_TYPE
+#ifndef HANDLE_OP
+#error "You should define HANDLE_OP before including this file"
+#endif //HANDLE_OP
-OP_TYPE(ConcatOp)
-OP_TYPE(Conv2DOp)
-OP_TYPE(DepthwiseConv2DOp)
-OP_TYPE(SoftmaxOp)
-OP_TYPE(PoolOp)
-OP_TYPE(FullyConnectedOp)
-OP_TYPE(CappedReluOp)
-OP_TYPE(BiasAddOp)
-OP_TYPE(VariableOp)
-OP_TYPE(ReluOp)
-OP_TYPE(ReshapeOp)
-OP_TYPE(ScaleOp)
-OP_TYPE(BatchNormOp)
-OP_TYPE(DropoutOp)
-OP_TYPE(TanhOp)
-OP_TYPE(ElementwiseOp)
-OP_TYPE(DeConv2DOp)
-OP_TYPE(EluOp)
-OP_TYPE(SqueezeOp)
-OP_TYPE(PadOp)
+HANDLE_OP(concat, ConcatOp)
+HANDLE_OP(conv2D, Conv2DOp)
+HANDLE_OP(depthwiseConv, DepthwiseConv2DOp)
+HANDLE_OP(softmax, SoftmaxOp)
+HANDLE_OP(pool, PoolOp)
+HANDLE_OP(fullyConnected, FullyConnectedOp)
+HANDLE_OP(cappedReLU, CappedReluOp)
+HANDLE_OP(biasAdd, BiasAddOp)
+HANDLE_OP(variable, VariableOp)
+HANDLE_OP(ReLU, ReluOp)
+HANDLE_OP(reshape, ReshapeOp)
+HANDLE_OP(scale, ScaleOp)
+HANDLE_OP(batchNorm, BatchNormOp)
+HANDLE_OP(dropout, DropoutOp)
+HANDLE_OP(tanh, TanhOp)
+HANDLE_OP(elementwise, ElementwiseOp)
+HANDLE_OP(deConv2D, DeConv2DOp)
+HANDLE_OP(ELU, EluOp)
+HANDLE_OP(squeeze, SqueezeOp)
+HANDLE_OP(pad, PadOp)
#ifndef NNCC_PASSDATA_H
#define NNCC_PASSDATA_H
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/Visitor.h"
#include "core/modelIR/TensorVariant.h"
-#include "core/modelIR/operations/operation.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Operation.h"
+#include "core/modelIR/Graph.h"
#include "ArtifactModel.h"
#include "ArtifactGeneratorCppCode.h"
#include "ArtifactGeneratorCppDecl.h"
/**
* @brief Implementations of the IVisitor visitors.
- * @param node
* @param op
*/
- void visit(mir::INode* node, mir::ops::ConcatOp& op) override;
- void visit(mir::INode* node, mir::ops::Conv2DOp& op) override;
- void visit(mir::INode* node, mir::ops::DepthwiseConv2DOp& op) override;
- void visit(mir::INode* node, mir::ops::SoftmaxOp& op) override;
- void visit(mir::INode* node, mir::ops::PoolOp& op) override;
- void visit(mir::INode* node, mir::ops::FullyConnectedOp& op) override;
- void visit(mir::INode* node, mir::ops::CappedReluOp& op) override;
- void visit(mir::INode* node, mir::ops::BiasAddOp& op) override;
- void visit(mir::INode* node, mir::ops::VariableOp& op) override;
- void visit(mir::INode* node, mir::ops::ReluOp& op) override;
- void visit(mir::INode* node, mir::ops::ReshapeOp& op) override;
- void visit(mir::INode* node, mir::ops::ScaleOp& op) override;
- void visit(mir::INode* node, mir::ops::BatchNormOp& op) override;
- void visit(mir::INode* node, mir::ops::DropoutOp& op) override;
- void visit(mir::INode* node, mir::ops::TanhOp& op) override;
- void visit(mir::INode* node, mir::ops::ElementwiseOp& op) override;
- void visit(mir::INode* node, mir::ops::DeConv2DOp& op) override;
- void visit(mir::INode* node, mir::ops::EluOp& op) override;
- void visit(mir::INode* node, mir::ops::SqueezeOp& op) override;
- void visit(mir::INode* node, mir::ops::PadOp& op) override;
+ void visit(mir::ops::ConcatOp& op) override;
+ void visit(mir::ops::Conv2DOp& op) override;
+ void visit(mir::ops::DepthwiseConv2DOp& op) override;
+ void visit(mir::ops::SoftmaxOp& op) override;
+ void visit(mir::ops::PoolOp& op) override;
+ void visit(mir::ops::FullyConnectedOp& op) override;
+ void visit(mir::ops::CappedReluOp& op) override;
+ void visit(mir::ops::BiasAddOp& op) override;
+ void visit(mir::ops::VariableOp& op) override;
+ void visit(mir::ops::ReluOp& op) override;
+ void visit(mir::ops::ReshapeOp& op) override;
+ void visit(mir::ops::ScaleOp& op) override;
+ void visit(mir::ops::BatchNormOp& op) override;
+ void visit(mir::ops::DropoutOp& op) override;
+ void visit(mir::ops::TanhOp& op) override;
+ void visit(mir::ops::ElementwiseOp& op) override;
+ void visit(mir::ops::DeConv2DOp& op) override;
+ void visit(mir::ops::EluOp& op) override;
+ void visit(mir::ops::SqueezeOp& op) override;
+ void visit(mir::ops::PadOp& op) override;
private:
using AF = ArtifactFactory;
* @brief The common part of the convolution and the depthwise convolution.
*/
template <typename Op>
- void genConvolution(mir::INode* node, Op& op, const std::string& acl_func_name,
- const std::string& suffix);
+ void genConvolution(Op& op, const std::string& acl_func_name, const std::string& suffix);
/**
* @brief Generates different types of activation functions: ReLU, Tanh etc.
* LINEAR, TANH.
* @param b - betha parameter used by some activation functions: LINEAR, LU_BOUNDED_RELU, TANH.
*/
- void genActivation(mir::INode* node, mir::OpDescription& op, const std::string& activation_name,
- float a = 0, float b = 0);
+ void
+ genActivation(mir::Operation& op, const std::string& activation_name, float a = 0, float b = 0);
/**
* @brief Used to generate a binary addition operation in handling of the elementwise.
/**
* @brief Generates a unique name for the tensor.
*/
- std::string tensorName(mir::INode* node) const;
+ std::string tensorName(mir::Operation* op) const;
/**
* @brief Generates tensor shape in DOM.
/**
* @brief Generates a DOM tensor.
- * @param node - node for which this tensor generated.
+ * @param op - an IR operation for which this tensor is generated.
* @param ir_shape - a shape in IR.
* @return - a DOM identifier for the created tensor.
*/
- std::shared_ptr<ArtifactId> genTensor(mir::INode* node, const mir::Shape& ir_shape);
+ std::shared_ptr<ArtifactId> genTensor(mir::Operation& op, const mir::Shape& ir_shape);
/**
* @brief Generates accessors for the input/output tensors.
*/
void serializeTensor(const mir::TensorVariant& tensor);
- std::set<mir::INode*> _inputs;
- std::set<mir::INode*> _outputs;
+ std::set<mir::Operation*> _inputs;
+ std::set<mir::Operation*> _outputs;
std::set<std::string> _tensorNames;
/**
std::unique_ptr<CaffeOpCreator> _opCreator;
// Maps Caffe blob name to MIR IODescriptor.
- std::map<std::string, mir::INode::IODescriptor> _blobNameToIODescriptor;
+ std::map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
static const std::map<std::string, CaffeOpType> _operatorTypes;
std::set<std::string> _problemsOpSet;
/**
* @brief Return MIR IODescriptors for the inputs of the given layer.
*/
- mir::INode::IODescriptorVector getMIRInputsForLayer(const ::caffe::LayerParameter& layer);
+ std::vector<mir::IODescriptor> getMIRInputsForLayer(const ::caffe::LayerParameter& layer);
/**
* @brief Prepare Caffe layer parameters for Model IR operation creator.
#ifndef FRONTEND_COMMON_INCLUDE_NN_IMPORTER_
#define FRONTEND_COMMON_INCLUDE_NN_IMPORTER_
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
namespace nnc {
#include <unordered_map>
#include "core/modelIR/Visitor.h"
-#include "core/modelIR/ir_node.h"
+#include "core/modelIR/Operation.h"
#include "core/modelIR/Tensor.h"
namespace mir
{
-class NNInterpreter : public IVisitor
-{
+class NNInterpreter : public IVisitor {
public:
explicit NNInterpreter() = default;
- void visit(INode::Ref node, ops::ConcatOp &op) override;
- void visit(INode::Ref node, ops::Conv2DOp &op) override;
- void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override;
- void visit(INode::Ref node, ops::ReluOp &op) override;
- void visit(INode::Ref node, ops::SoftmaxOp &op) override;
- void visit(INode::Ref node, ops::PoolOp &op) override;
- void visit(INode::Ref node, ops::FullyConnectedOp &op) override;
- void visit(INode::Ref node, ops::CappedReluOp &op) override;
- void visit(INode::Ref node, ops::BiasAddOp &op) override;
- void visit(INode::Ref node, ops::VariableOp &op) override;
- void visit(INode::Ref node, ops::ReshapeOp &op) override;
- void visit(INode::Ref node, ops::ScaleOp &op) override;
- void visit(INode::Ref node, ops::BatchNormOp &op) override;
- void visit(INode::Ref node, ops::DropoutOp &op) override;
- void visit(INode::Ref node, ops::TanhOp &op) override;
- void visit(INode::Ref node, ops::ElementwiseOp &op) override;
- void visit(INode::Ref node, ops::DeConv2DOp &op) override;
- void visit(INode::Ref node, ops::EluOp &op) override;
- void visit(INode* node, ops::SqueezeOp& op) override;
- void visit(INode* node, ops::PadOp& op) override;
+ void visit(ops::ConcatOp& op) override;
+ void visit(ops::Conv2DOp& op) override;
+ void visit(ops::DepthwiseConv2DOp& op) override;
+ void visit(ops::ReluOp& op) override;
+ void visit(ops::SoftmaxOp& op) override;
+ void visit(ops::PoolOp& op) override;
+ void visit(ops::FullyConnectedOp& op) override;
+ void visit(ops::CappedReluOp& op) override;
+ void visit(ops::BiasAddOp& op) override;
+ void visit(ops::VariableOp& op) override;
+ void visit(ops::ReshapeOp& op) override;
+ void visit(ops::ScaleOp& op) override;
+ void visit(ops::BatchNormOp& op) override;
+ void visit(ops::DropoutOp& op) override;
+ void visit(ops::TanhOp& op) override;
+ void visit(ops::ElementwiseOp& op) override;
+ void visit(ops::DeConv2DOp& op) override;
+ void visit(ops::EluOp& op) override;
+ void visit(ops::SqueezeOp& op) override;
+ void visit(ops::PadOp& op) override;
void setInput(const std::string &name, const TensorVariant& data);
- std::vector<TensorVariant> &getResult(INode::Ref node);
+ std::vector<TensorVariant> &getResult(Operation* op);
/**
* @brief Intermediate interpreter results getter
* @param nodeName - name of node
std::vector<TensorVariant> &var(size_t id);
/**
* @brief Used to collect nodes data for getting intermediate interpreter results
- * @param n - reference to node
+ * @param op - reference to node
*/
- void mapByName(INode::Ref n);
+ void mapByName(Operation* op);
private:
std::map<size_t, std::vector<TensorVariant>> vars;
std::unordered_map<std::string, TensorVariant> data;
- std::map<std::string, INode::Ref> nodeByName;
+ std::map<std::string, Operation*> _opByName;
};
} // namespace mir
#ifndef _NNC_SOFT_BACKEND_BASE_GENERATOR_H_
#define _NNC_SOFT_BACKEND_BASE_GENERATOR_H_
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "pass/Pass.h"
#include "pass/PassData.h"
#include "passes/common_frontend/nn_importer.h"
#include "passes/common_frontend/model_allocation.h"
-#include "core/modelIR/graph.h"
-#include "core/modelIR/ir_node.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/TensorUtil.h"
#include "core/modelIR/TensorVariant.h"
// This map maps indices of TFLite tensors to MIR operations/nodes
// that correspond to operations having these tensors as output.
- std::map<int, mir::INode::Ref> _opsForTensorsTheyOutput;
+ std::map<int, mir::Operation*> _opsForTensorsTheyOutput;
std::set<std::string> _problemsOpSet;
/**
* @brief Return MIR ops, preceding given tflite operator
*/
- std::vector<mir::INode::Ref> getPrecedingMIROps(const ::tflite::Operator* op);
+ std::vector<mir::Operation*> getPrecedingMIROps(const ::tflite::Operator* op);
std::shared_ptr<mir::TensorVariant> createTensor(const ::tflite::Tensor* t,
const ::tflite::Buffer* b);
#include "passes/acl_soft_backend/AclCppOpGenerator.h"
#include "passes/acl_soft_backend/AclCppException.h"
-#include "core/modelIR/ir_node.h"
#include "core/modelIR/ShapeRange.h"
#include "core/modelIR/TensorUtil.h"
#include "option/Options.h"
return _module;
}
-void AclCppOpGenerator::visit(INode* node, ops::ConcatOp& op) {
+void AclCppOpGenerator::visit(ops::ConcatOp& op) {
static const char* axis_names[] = {"arm_compute::DataLayoutDimension::CHANNEL",
"arm_compute::DataLayoutDimension::HEIGHT",
"arm_compute::DataLayoutDimension::WIDTH",
"arm_compute::DataLayoutDimension::BATCHES"};
assert(op.getAxis() < sizeof(axis_names) / sizeof(const char*));
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
auto prefix = out->name() + "_concatenate_layer";
auto inputs_var = _constrBlock->var("std::vector<arm_compute::ICLTensor*>", prefix + "_inputs");
auto inputs = inputs_var->use();
- for (auto i : node->getPrevNodes())
- _constrBlock->call("push_back", {AF::ref(AF::id(tensorName(i.node)))}, inputs);
+ for (auto i : op.getPrevNodes())
+ _constrBlock->call("push_back", {AF::ref(AF::id(tensorName(i.op)))}, inputs);
auto concat_layer_var = _artifactClass->var(false, "arm_compute::CLConcatenateLayer", prefix);
auto concat_layer = concat_layer_var->use();
_infBlock->call("run", {}, concat_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::Conv2DOp& op) {
- genConvolution(node, op, "arm_compute::CLConvolutionLayer", "_convolution_layer");
+void AclCppOpGenerator::visit(ops::Conv2DOp& op) {
+ genConvolution(op, "arm_compute::CLConvolutionLayer", "_convolution_layer");
}
-void AclCppOpGenerator::visit(INode* node, ops::DepthwiseConv2DOp& op) {
- genConvolution(node, op, "arm_compute::CLDepthwiseConvolutionLayer",
+void AclCppOpGenerator::visit(ops::DepthwiseConv2DOp& op) {
+ genConvolution(op, "arm_compute::CLDepthwiseConvolutionLayer",
"_depthwise_convolution_layer");
}
-void AclCppOpGenerator::visit(INode* node, ops::SoftmaxOp& op) {
- auto& prev_nodes = node->getPrevNodes();
- assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
- auto in = AF::id(tensorName(in_node));
- auto out = genTensor(node, op.getOutputShape(0));
+void AclCppOpGenerator::visit(ops::SoftmaxOp& op) {
+ auto& in_ops = op.getPrevNodes();
+ assert(in_ops.size() == 1);
+ auto in_op = in_ops[0].op;
+ auto in = AF::id(tensorName(in_op));
+ auto out = genTensor(op, op.getOutputShape(0));
auto sm_layer_var = _artifactClass->var(false, "arm_compute::CLSoftmaxLayer",
out->name() + "_softmax_layer");
auto sm_layer = sm_layer_var->use();
_infBlock->call("run", {}, sm_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::PoolOp& op) {
+void AclCppOpGenerator::visit(ops::PoolOp& op) {
const char* pooling_type;
switch (op.getPoolingType()) {
assert(false && "Not a supported pooling type");
}
- auto& prev_nodes = node->getPrevNodes();
+ auto& prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
- auto in = AF::id(tensorName(in_node));
- auto out = genTensor(node, op.getOutputShape(0));
+ auto in_op = prev_nodes[0].op;
+ auto in = AF::id(tensorName(in_op));
+ auto out = genTensor(op, op.getOutputShape(0));
auto prefix = out->name() + "_pooling_layer";
auto pad_stride_info_var = _constrBlock->var("arm_compute::PadStrideInfo",
_infBlock->call("run", {}, pooling_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::FullyConnectedOp& op) {
+void AclCppOpGenerator::visit(ops::FullyConnectedOp& op) {
const TensorVariant& ir_weights = op.getWeights();
const Shape& ir_weights_shape = ir_weights.getShape();
- auto& prev_nodes = node->getPrevNodes();
+ auto& prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
+ auto in_op = prev_nodes[0].op;
// Get the input node tensor id in the DOM.
- auto in = AF::id(tensorName(in_node));
+ auto in = AF::id(tensorName(in_op));
// Create the output tensor in the DOM.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
string operation_name = out->name() + "_fully_connected_layer";
// Create the weights tensor in the DOM and use its id.
_infBlock->call("run", {}, fully_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::CappedReluOp& op) {
- genActivation(node, op, "LU_BOUNDED_RELU", op.getCap());
+void AclCppOpGenerator::visit(ops::CappedReluOp& op) {
+ genActivation(op, "LU_BOUNDED_RELU", op.getCap());
}
-void AclCppOpGenerator::visit(INode* node, ops::BiasAddOp& op) {
+void AclCppOpGenerator::visit(ops::BiasAddOp& op) {
const auto& ir_biases = op.getWeights();
assert(ir_biases.getShape().rank() == 1);
- auto& prev_nodes = node->getPrevNodes();
+ auto& prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
+ auto in_op = prev_nodes[0].op;
// Get the input node tensor id in the DOM.
- auto in = AF::id(tensorName(in_node));
+ auto in = AF::id(tensorName(in_op));
// Create the output tensor in the DOM and obtain its identifier.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
// Prefix used for the name of variables related to the operation implementation.
string operation_name = out->name() + "_bias_add_layer";
_infBlock->call("run", {}, arithmetic_add_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::VariableOp& op) {
- // Axes order is HWC in the Model IR and WHC in the ACL library, so we are switching the first
- // two dimensions.
- genTensor(node, transposeShape<1, 0, 2>(op.getOutputShape(0)));
+void AclCppOpGenerator::visit(ops::VariableOp& op) {
+ genTensor(op, transposeShape<1, 0, 2>(op.getOutputShape(0)));
}
-void AclCppOpGenerator::visit(INode* node, ops::ReluOp& op) {
- genActivation(node, op, "RELU");
+void AclCppOpGenerator::visit(ops::ReluOp& op) {
+ genActivation(op, "RELU");
}
-void AclCppOpGenerator::visit(INode* node, ops::ReshapeOp& op) {
- auto& prev_nodes = node->getPrevNodes();
+void AclCppOpGenerator::visit(ops::ReshapeOp& op) {
+ auto& prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
// Get the id of the input tensor in the generated artifact.
- auto in_node = prev_nodes[0].node;
- auto in = AF::id(tensorName(in_node));
+ auto in_op = prev_nodes[0].op;
+ auto in = AF::id(tensorName(in_op));
// Create the output tensor in the DOM and return its id.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
// Create an instance of the CLReshapeLayer class as a member of the artifact class.
auto reshape_layer_var = _artifactClass->var(false, "arm_compute::CLReshapeLayer",
_infBlock->call("run", {}, reshape_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::ScaleOp& op) {
+void AclCppOpGenerator::visit(ops::ScaleOp& op) {
// May be not a perfect implementation, using the CLPixelWiseMultiplication ACL function taking
// two input tensors with the same shapes.
- auto prev_nodes = node->getPrevNodes();
+ auto prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
+ auto in_op = prev_nodes[0].op;
// Get input tensor identifier in the generated artifact.
- auto in = AF::id(tensorName(in_node));
+ auto in = AF::id(tensorName(in_op));
// Generate output tensor description in the DOM.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
auto prefix = out->name() + "_scale_layer";
// Create a CLPixelWiseMultiplication instance.
auto scale_layer_var = _artifactClass->var(false, "arm_compute::CLPixelWiseMultiplication",
prefix);
auto scale_layer = scale_layer_var->use();
- auto scale_tensor = genTensor(prefix + "_scales", in_node->getOperation()->getOutputShape(0));
+ auto scale_tensor = genTensor(prefix + "_scales", in_op->getOutputShape(0));
// Construct the vector containing scales.
auto scales_var = _constrBlock->var("std::vector<float>", prefix + "_scales");
_infBlock->call("run", {}, scale_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::BatchNormOp& op) {
+void AclCppOpGenerator::visit(ops::BatchNormOp& op) {
// Not supported in our framework, but present in ACL API.
throw AclCppException("Not supported in inference yet.");
}
-void AclCppOpGenerator::visit(INode* node, ops::DropoutOp& op) {
+void AclCppOpGenerator::visit(ops::DropoutOp& op) {
// Just copy input tensor to the output one.
- auto prev_nodes = node->getPrevNodes();
- assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
+ auto prev_ops = op.getPrevNodes();
+ assert(prev_ops.size() == 1);
+ auto in_op = prev_ops[0].op;
// Get input tensor identifier in the generated artifact.
- auto in = AF::id(tensorName(in_node));
+ auto in = AF::id(tensorName(in_op));
// Generate output tensor description in the DOM.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
// Create a CLCopy instance.
auto copy_layer_var = _artifactClass->var(false, "arm_compute::CLCopy",
_infBlock->call("run", {}, copy_layer);
}
-void AclCppOpGenerator::visit(INode* node, ops::TanhOp& op) {
- genActivation(node, op, "TANH");
+void AclCppOpGenerator::visit(ops::TanhOp& op) {
+ genActivation(op, "TANH");
}
-void AclCppOpGenerator::visit(INode* node, ops::ElementwiseOp& op) {
+void AclCppOpGenerator::visit(ops::ElementwiseOp& op) {
// Create the output tensor in the DOM and obtain its identifier.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
- auto& prev_nodes = node->getPrevNodes();
+ auto& prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() >= 2);
- auto in_node1 = prev_nodes[0].node;
+ auto in_op1 = prev_nodes[0].op;
// Get the identifier of the first input tensor in the DOM.
- auto in1 = AF::id(tensorName(in_node1));
+ auto in1 = AF::id(tensorName(in_op1));
for (int i = 1; i < prev_nodes.size(); ++i) {
- auto in_node2 = prev_nodes[i].node;
+ auto in_op2 = prev_nodes[i].op;
// Get the identifier of the second input tensor in the DOM.
- auto in2 = AF::id(tensorName(in_node2));
+ auto in2 = AF::id(tensorName(in_op2));
// Chaining the partial results of binary operations.
// On the last iteration the result is saved in the node output.
}
}
-void AclCppOpGenerator::visit(INode* node, ops::DeConv2DOp& op) {
- genConvolution(node, op, "arm_compute::CLDeconvolutionLayer", "_deconvolution_layer");
+void AclCppOpGenerator::visit(ops::DeConv2DOp& op) {
+ genConvolution(op, "arm_compute::CLDeconvolutionLayer", "_deconvolution_layer");
}
-void AclCppOpGenerator::visit(INode* node, ops::EluOp& op) {
+void AclCppOpGenerator::visit(ops::EluOp& op) {
throw AclCppException("Not supported by the ACL library yet.");
}
-void AclCppOpGenerator::visit(INode *node, ops::PadOp &op) {
+void AclCppOpGenerator::visit(ops::PadOp& op) {
throw AclCppException("Not supported by the ACL library yet.");
}
template <typename Op>
-void AclCppOpGenerator::genConvolution(INode* node, Op& op, const string& acl_func_name,
- const string& suffix) {
+void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, const string& suffix) {
auto ir_weights = transposeTensor<1, 0, 2, 3>(make_shared<TensorVariant>(op.getKernel()));
const Shape& ir_weights_shape = ir_weights->getShape();
assert(ir_weights_shape.rank() == 4);
uint32_t pad_y = op.getPadding(1);
assert(op.getPadding(2) == 0);
- auto& prev_nodes = node->getPrevNodes();
+ auto& prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
- auto in_node = prev_nodes[0].node;
+ auto in_op = prev_nodes[0].op;
// Get the identifier of the input tensor in the DOM.
- auto in = AF::id(tensorName(in_node));
+ auto in = AF::id(tensorName(in_op));
// Create the output tensor in the DOM.
- auto out = genTensor(node, transposeShape<1, 0, 2>(op.getOutputShape(0)));
+ auto out = genTensor(op, transposeShape<1, 0, 2>(op.getOutputShape(0)));
string operation_name = out->name() + suffix;
// Generate a tensor for weights (kernel) in the DOM.
_infBlock->call("run", {}, conv_layer);
}
-void AclCppOpGenerator::genActivation(INode* node, OpDescription& op,
- const std::string& activation_name, float a, float b) {
- auto &prev_nodes = node->getPrevNodes();
+void AclCppOpGenerator::genActivation(mir::Operation& op, const std::string& activation_name, float a, float b) {
+ auto &prev_nodes = op.getPrevNodes();
assert(prev_nodes.size() == 1);
// Get the id of the input tensor.
- auto in_node = prev_nodes[0].node;
- auto in = AF::id(tensorName(in_node));
+ auto in_op = prev_nodes[0].op;
+ auto in = AF::id(tensorName(in_op));
// Create the output tensor in the DOM and return its id.
- auto out = genTensor(node, op.getOutputShape(0));
+ auto out = genTensor(op, op.getOutputShape(0));
auto prefix = out->name() + "_activation_layer";
// Create an instance of the ActivationLayerInfo class as a local variable in the artifact
return out;
}
-string AclCppOpGenerator::tensorName(INode* node) const {
+string AclCppOpGenerator::tensorName(Operation* op) const {
string tensor_name;
- if (!node->getName().empty()) {
- tensor_name = "_" + node->getName();
+ if (!op->getName().empty()) {
+ tensor_name = "_" + op->getName();
replace_if(tensor_name.begin(),
tensor_name.end(),
[](char c) { return std::isalnum(c) == 0; }, '_');
} else {
- tensor_name = "tensor_" + to_string(node->getId());
+ tensor_name = "tensor_" + to_string(op->getId());
}
return tensor_name;
return id;
}
-shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(INode* node, const Shape& ir_shape) {
- if (node->getPrevNodes().empty())
- _inputs.insert(node);
+std::shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(mir::Operation& op, const Shape& ir_shape) {
+ if (op.getPrevNodes().empty())
+ _inputs.insert(&op);
- if (node->getNextNodes().empty())
- _outputs.insert(node);
+ if (op.getNextNodes().empty())
+ _outputs.insert(&op);
- return genTensor(tensorName(node), ir_shape, !node->getName().empty());
+ return genTensor(tensorName(&op), ir_shape, !op.getName().empty());
}
void AclCppOpGenerator::genNamed() {
}
}
-void AclCppOpGenerator::visit(INode* node, ops::SqueezeOp& op) {
+void AclCppOpGenerator::visit(ops::SqueezeOp& op) {
assert(false && "Unimplemented operation: Squeeze");
}
auto inputs = getMIRInputsForLayer(lp);
auto params = createOpParams(lp);
- INode::IODescriptorVector outputs;
+ std::vector<IODescriptor> outputs;
CaffeOpType op_type = _operatorTypes.at(lp.type());
switch (op_type) {
return tensor;
}
-INode::IODescriptorVector CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) {
- INode::IODescriptorVector inputs;
+std::vector<mir::IODescriptor> CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) {
+ std::vector<mir::IODescriptor> inputs;
for (const auto& input_name : layer.bottom())
inputs.push_back(_blobNameToIODescriptor.at(input_name));
// For now, we assume that:
// - there is exactly one output;
// - the output is from the last layer.
- _graph->markOutput(_blobNameToIODescriptor[last_layer.top(0)].node);
+ _graph->markOutput(_blobNameToIODescriptor[last_layer.top(0)].op);
}
void CaffeImporter::setIrNodeNames() {
// FIXME Support multiple outputs.
for (auto& item : _blobNameToIODescriptor)
- item.second.node->setName(item.first);
+ item.second.op->setName(item.first);
}
PassData CaffeImporter::run(PassData) {
#include <set>
#include <cmath>
-
+#include <iostream>
namespace nnc {
return unfold_kernel;
}
-INode::IODescriptorVector CaffeOpCreator::convertInput(const LayerParameter& layer) {
+std::vector<IODescriptor> CaffeOpCreator::convertInput(const LayerParameter& layer) {
const auto& params = layer.input_param();
const auto num_inputs = layer.top_size();
const auto num_shapes = params.shape_size();
- INode::IODescriptorVector descriptors;
+ std::vector<IODescriptor> descriptors;
assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes.");
// TODO: Implement a more consistent way of handling shapes within the model.
if (shape.rank() == 3)
shape = Shape{shape.dim(1), shape.dim(2), shape.dim(0)};
- variable->getOperation()->setOutputShape(0, shape);
+ variable->setOutputShape(0, shape);
descriptors.push_back(variable->getOutput(0));
}
problems_op_set.insert("Conv2D: Unsupported number of pads");
}
-INode::IODescriptorVector
-CaffeOpCreator::convertConvolution(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertConvolution(const std::vector<IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::ConvolutionParameter& opts) {
ops::PaddingType pad_type = ops::PaddingType::Custom;
Shape stride_shape = getConvStride(opts);
std::shared_ptr<IrTensor> unfolded_tensor = params[0];
- INode* conv2d;
+ Operation* conv2d;
auto in_group_size = params[0]->getShape().dim(2);
auto out_channels = params[0]->getShape().dim(3);
int32_t num_groups = opts.group();
pad_h = pad_w = opts.pad(0);
if (is_depthwise) {
- auto op = static_cast<ops::DepthwiseConv2DOp*>(conv2d->getOperation());
+ auto op = static_cast<ops::DepthwiseConv2DOp*>(conv2d);
op->setPadding(0, pad_h);
op->setPadding(1, pad_w);
op->setPadding(2, 0);
} else {
- auto op = static_cast<ops::Conv2DOp*>(conv2d->getOperation());
+ auto op = static_cast<ops::Conv2DOp*>(conv2d);
op->setPadding(0, pad_h);
op->setPadding(1, pad_w);
op->setPadding(2, 0);
* implement it correctly.
* @todo Support axis and transpose parameters as needed.
*/
-INode::IODescriptorVector
-CaffeOpCreator::convertInnerProduct(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertInnerProduct(const std::vector<IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::InnerProductParameter& opts) {
// Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
auto reshape = createOp<ops::ReshapeOp>(inputs);
int32_t fc_input_size = static_cast<int32_t>(
params[0]->getShape().numElements()) / opts.num_output();
- reshape->getOperation()->setOutputShape(0, {1, fc_input_size});
+ reshape->setOutputShape(0, {1, fc_input_size});
auto fully_connected = createOp<ops::FullyConnectedOp>({reshape->getOutput(0)},
std::move(*params[0]));
}
}
-INode::IODescriptorVector
-CaffeOpCreator::convertConcat(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertConcat(const std::vector<IODescriptor>& inputs,
const caffe::ConcatParameter& opts) {
auto result = createOp<ops::ConcatOp>(inputs, inputs.size(), getAxisValue(opts));
return {result->getOutput(0)};
problemsOpSet.insert("Pooling: conflicting padding properties in pooling");
}
-INode::IODescriptorVector
-CaffeOpCreator::convertPooling(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertPooling(const std::vector<IODescriptor>& inputs,
const caffe::PoolingParameter& opts) {
Shape window_shape = getPoolWindowShape(opts);
ops::PoolOp::PoolingType pool_type = getPoolingType(opts);
pad_type, border_type);
// Set pads
- auto op = static_cast<ops::PoolOp*>(pooling->getOperation());
+ auto op = static_cast<ops::PoolOp*>(pooling);
int pad_h = opts.has_pad_h() ? opts.pad_h() : 0;
int pad_w = opts.has_pad_w() ? opts.pad_w() : 0;
if (opts.has_pad())
return {pooling->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertSoftmax(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertSoftmax(const std::vector<IODescriptor>& inputs,
const caffe::SoftmaxParameter& opts) {
auto softmax = createOp<ops::SoftmaxOp>(inputs, getAxisValue(opts));
return {softmax->getOutput(0)};
* @todo Decide how to react to the absence of "shape" parameter.
* @todo Support zero values in "shape" parameter.
*/
-INode::IODescriptorVector
-CaffeOpCreator::convertReshape(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertReshape(const std::vector<IODescriptor>& inputs,
const caffe::ReshapeParameter& opts) {
auto reshape = createOp<ops::ReshapeOp>(inputs);
Shape new_shape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size());
- reshape->getOperation()->setOutputShape(0, new_shape);
+ reshape->setOutputShape(0, new_shape);
return {reshape->getOutput(0)};
}
problems_op_set.insert("ReLU layer negative_slope param is not supported yet.");
}
-INode::IODescriptorVector
-CaffeOpCreator::convertReLU(const INode::IODescriptorVector& inputs) {
+std::vector<IODescriptor>
+CaffeOpCreator::convertReLU(const std::vector<IODescriptor>& inputs) {
auto relu = createOp<ops::ReluOp>(inputs);
return {relu->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertScale(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertScale(const std::vector<IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::ScaleParameter& opts) {
auto scale = createOp<ops::ScaleOp>(inputs, std::move(*params[0]));
problems_op_set.insert("Unexpected shape of scale parameter in batch norm");
}
-INode::IODescriptorVector
-CaffeOpCreator::convertBatchNorm(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertBatchNorm(const std::vector<IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::BatchNormParameter& opts) {
float eps = opts.eps();
return {scale->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertDropout(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertDropout(const std::vector<IODescriptor>& inputs,
const caffe::DropoutParameter& opts) {
auto dropout = createOp<ops::DropoutOp>(inputs, opts.dropout_ratio());
return {dropout->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertDeconvolution(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertDeconvolution(const std::vector<IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::ConvolutionParameter& opts) {
ops::PaddingType pad_type = ops::PaddingType::Custom;
stride_shape, pad_type);
// Set pads
- auto op = static_cast<ops::DeConv2DOp*>(deconv2d->getOperation());
+ auto op = static_cast<ops::DeConv2DOp*>(deconv2d);
int pad_h = opts.has_pad_h() ? opts.pad_h() : 0;
int pad_w = opts.has_pad_w() ? opts.pad_w() : 0;
}
}
-INode::IODescriptorVector
-CaffeOpCreator::convertELU(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertELU(const std::vector<IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::ELUParameter& opts) {
auto elu = createOp<ops::EluOp>(inputs, opts.alpha());
return {elu->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertTanH(const INode::IODescriptorVector& inputs) {
+std::vector<IODescriptor>
+CaffeOpCreator::convertTanH(const std::vector<IODescriptor>& inputs) {
auto tanh = createOp<ops::TanhOp>(inputs);
return {tanh->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertEltwise(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertEltwise(const std::vector<IODescriptor>& inputs,
const caffe::EltwiseParameter& opts) {
ops::ElementwiseOp::OpType optype;
switch (opts.operation()){
return {elementwise->getOutput(0)};
}
-INode::IODescriptorVector
-CaffeOpCreator::convertSplit(const INode::IODescriptorVector& inputs,
+std::vector<IODescriptor>
+CaffeOpCreator::convertSplit(const std::vector<IODescriptor>& inputs,
const caffe::LayerParameter& lp) {
- INode::IODescriptorVector outputs(lp.top_size(), inputs.at(0));
+ std::vector<IODescriptor> outputs(lp.top_size(), inputs.at(0));
return outputs;
}
-void CaffeOpCreator::connectInputs(INode* node, const INode::IODescriptorVector& inputs) {
+void CaffeOpCreator::connectInputs(mir::Operation* op, const std::vector<IODescriptor>& inputs) {
for (int i = 0; i < static_cast<int>(inputs.size()); ++i)
- node->connectInputTo(i, inputs[i]);
+ op->connectInputTo(i, inputs[i]);
}
} // namespace nnc
#include <vector>
#include <memory>
-#include "core/modelIR/graph.h"
-#include "core/modelIR/ir_node.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/operations/common.h"
#include "core/modelIR/Shape.h"
namespace nnc {
using nnc::mir::Graph;
-using nnc::mir::INode;
using IrTensor = nnc::mir::TensorVariant;
using nnc::mir::Shape;
public:
explicit CaffeOpCreator(Graph* g) : _graph(g) {};
- INode::IODescriptorVector convertInput(const caffe::LayerParameter& layer);
+ std::vector<mir::IODescriptor> convertInput(const caffe::LayerParameter& layer);
- INode::IODescriptorVector
- convertConvolution(const INode::IODescriptorVector& inputs,
+ std::vector<mir::IODescriptor>
+ convertConvolution(const std::vector<mir::IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::ConvolutionParameter& opts);
- INode::IODescriptorVector
- convertInnerProduct(const INode::IODescriptorVector& inputs,
+ std::vector<mir::IODescriptor>
+ convertInnerProduct(const std::vector<mir::IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::InnerProductParameter& opts);
- INode::IODescriptorVector convertConcat(const INode::IODescriptorVector& inputs,
- const caffe::ConcatParameter& opts);
+ std::vector<mir::IODescriptor> convertConcat(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::ConcatParameter& opts);
- INode::IODescriptorVector convertPooling(const INode::IODescriptorVector& inputs,
- const caffe::PoolingParameter& opts);
+ std::vector<mir::IODescriptor> convertPooling(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::PoolingParameter& opts);
- INode::IODescriptorVector convertSoftmax(const INode::IODescriptorVector& inputs,
- const caffe::SoftmaxParameter& opts);
+ std::vector<mir::IODescriptor> convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::SoftmaxParameter& opts);
- INode::IODescriptorVector convertReshape(const INode::IODescriptorVector& inputs,
- const caffe::ReshapeParameter& opts);
+ std::vector<mir::IODescriptor> convertReshape(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::ReshapeParameter& opts);
- INode::IODescriptorVector convertReLU(const INode::IODescriptorVector& inputs);
+ std::vector<mir::IODescriptor> convertReLU(const std::vector<mir::IODescriptor>& inputs);
- INode::IODescriptorVector convertScale(const INode::IODescriptorVector& inputs,
- const std::vector<std::shared_ptr<IrTensor>>& params,
- const caffe::ScaleParameter& opts);
+ std::vector<mir::IODescriptor> convertScale(const std::vector<mir::IODescriptor>& inputs,
+ const std::vector<std::shared_ptr<IrTensor>>& params,
+ const caffe::ScaleParameter& opts);
- INode::IODescriptorVector convertBatchNorm(const INode::IODescriptorVector& inputs,
- const std::vector<std::shared_ptr<IrTensor>>& params,
- const caffe::BatchNormParameter& layer);
+ std::vector<mir::IODescriptor>
+ convertBatchNorm(const std::vector<mir::IODescriptor>& inputs,
+ const std::vector<std::shared_ptr<IrTensor>>& params,
+ const caffe::BatchNormParameter& layer);
- INode::IODescriptorVector convertDropout(const INode::IODescriptorVector& inputs,
- const caffe::DropoutParameter& opts);
+ std::vector<mir::IODescriptor> convertDropout(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::DropoutParameter& opts);
- INode::IODescriptorVector
- convertDeconvolution(const INode::IODescriptorVector& inputs,
+ std::vector<mir::IODescriptor>
+ convertDeconvolution(const std::vector<mir::IODescriptor>& inputs,
const std::vector<std::shared_ptr<IrTensor>>& params,
const caffe::ConvolutionParameter& opts);
- INode::IODescriptorVector convertELU(const INode::IODescriptorVector& inputs,
- const std::vector<std::shared_ptr<IrTensor>>& params,
- const caffe::ELUParameter& opts);
+ std::vector<mir::IODescriptor> convertELU(const std::vector<mir::IODescriptor>& inputs,
+ const std::vector<std::shared_ptr<IrTensor>>& params,
+ const caffe::ELUParameter& opts);
- INode::IODescriptorVector convertTanH(const INode::IODescriptorVector& inputs);
+ std::vector<mir::IODescriptor> convertTanH(const std::vector<mir::IODescriptor>& inputs);
- INode::IODescriptorVector convertEltwise(const INode::IODescriptorVector& inputs,
- const caffe::EltwiseParameter& opts);
+ std::vector<mir::IODescriptor> convertEltwise(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::EltwiseParameter& opts);
- INode::IODescriptorVector convertSplit(const INode::IODescriptorVector& inputs,
- const caffe::LayerParameter& lp);
+ std::vector<mir::IODescriptor> convertSplit(const std::vector<mir::IODescriptor>& inputs,
+ const caffe::LayerParameter& lp);
void checkConvolution(const caffe::ConvolutionParameter& layer, std::set<std::string>&);
private:
Graph* _graph = nullptr;
- void connectInputs(INode*, const INode::IODescriptorVector& inputs);
+ void connectInputs(mir::Operation*, const std::vector<mir::IODescriptor>& inputs);
- template <typename OpType, typename ...Types>
- INode* createOp(const INode::IODescriptorVector& inputs, Types&& ... args);
+ template<typename OpType, typename... Types>
+ mir::Operation* createOp(const std::vector<mir::IODescriptor>& inputs, Types&&... args);
};
-template <typename OpType, typename ...Types>
-INode* CaffeOpCreator::createOp(const INode::IODescriptorVector& inputs, Types&& ... args) {
+template<typename OpType, typename... Types>
+mir::Operation*
+CaffeOpCreator::createOp(const std::vector<mir::IODescriptor>& inputs, Types&&... args) {
// TODO: set operation names
- auto node = _graph->create<OpType>("", std::forward<Types>(args)...);
- connectInputs(node, inputs);
- return node;
+ auto op = _graph->create<OpType>("", std::forward<Types>(args)...);
+ connectInputs(op, inputs);
+ return op;
}
} // namespace nnc
#include "ops/Dropout.h"
#include "ops/BatchNorm.h"
-namespace nnc
-{
+namespace nnc {
using namespace nnc::mir;
void NNInterpreter::setInput(const std::string &name, const TensorVariant& t) { data.emplace(name, t); }
-void NNInterpreter::visit(INode::Ref node, ops::VariableOp &op)
-{
- mapByName(node);
+void NNInterpreter::visit(ops::VariableOp& op) {
+ mapByName(&op);
(void)op;
- auto it = data.find(node->getName());
+ auto it = data.find(op.getName());
if( it == data.end() )
{
- throw std::runtime_error("Can't find data for node \"" + node->getName() + ". Input data was not set correctly?");
+ throw std::runtime_error("Can't find data for node \"" + op.getName() + ". Input data was not set correctly?");
}
- var(node->getId()) = {it->second};
+ var(op.getId()) = {it->second};
}
-std::vector<TensorVariant> &NNInterpreter::getResult(INode::Ref node)
-{
- auto res = vars.find(node->getId());
+std::vector<TensorVariant> &NNInterpreter::getResult(Operation* op) {
+ auto res = vars.find(op->getId());
if (res != vars.end())
{
return res->second;
}
std::vector<TensorVariant> &NNInterpreter::getOperationResult(const std::string &nodeName) {
- auto it = nodeByName.find(nodeName);
- if (it == nodeByName.end())
+ auto it = _opByName.find(nodeName);
+ if (it == _opByName.end())
throw std::runtime_error("Node not found <" + nodeName + ">");
return getResult(it->second);
}
-void NNInterpreter::visit(INode::Ref node, ops::ConcatOp &op)
-{
- mapByName(node);
- auto &operands = node->getPrevNodes();
+void NNInterpreter::visit(ops::ConcatOp& op) {
+ mapByName(&op);
+ auto &operands = op.getPrevNodes();
std::vector<TensorVariant> ins;
for (auto &in : operands)
{
- ins.push_back(var(in.node->getId())[in.index]);
+ ins.push_back(var(in.op->getId())[in.index]);
}
- var(node->getId()) = Concat<float>(ins, op.getOutputShape(0), op.getAxis())();
+ var(op.getId()) = Concat<float>(ins, op.getOutputShape(0), op.getAxis())();
}
-void NNInterpreter::visit(INode::Ref node, ops::Conv2DOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- var(node->getId()) = Conv2D(var(operand.node->getId())[operand.index], op)();
+void NNInterpreter::visit(ops::Conv2DOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ var(op.getId()) = Conv2D(var(operand.op->getId())[operand.index], op)();
}
-void NNInterpreter::visit(INode::Ref node, ops::ReshapeOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = Reshape<float>(input, op.getOutputShape(0))();
+void NNInterpreter::visit(ops::ReshapeOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ auto input = var(operand.op->getId())[operand.index];
+ var(op.getId()) = Reshape<float>(input, op.getOutputShape(0))();
}
-void NNInterpreter::visit(INode::Ref node, ops::ReluOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- Tensor<float> input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = Fill<float>(
+void NNInterpreter::visit(ops::ReluOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ Tensor<float> input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = Fill<float>(
op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })();
}
-void NNInterpreter::visit(INode::Ref node, ops::SoftmaxOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = Softmax(op.getInputShape(0), input, op.getAxis())();
+void NNInterpreter::visit(ops::SoftmaxOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ auto input = var(operand.op->getId())[operand.index];
+ var(op.getId()) = Softmax(op.getInputShape(0), input, op.getAxis())();
}
-void NNInterpreter::visit(INode::Ref node, ops::PoolOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = Pool(input, op)();
+void NNInterpreter::visit(ops::PoolOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ auto input = var(operand.op->getId())[operand.index];
+ var(op.getId()) = Pool(input, op)();
}
-void NNInterpreter::visit(INode::Ref node, ops::FullyConnectedOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- TensorVariant input = var(operand.node->getId())[operand.index];
- var(node->getId()) = FullyConnected<float>(input, op)();
+void NNInterpreter::visit(ops::FullyConnectedOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ TensorVariant input = var(operand.op->getId())[operand.index];
+ var(op.getId()) = FullyConnected<float>(input, op)();
}
-void NNInterpreter::visit(INode *node, ops::CappedReluOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- Tensor<float> input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+void NNInterpreter::visit(ops::CappedReluOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ Tensor<float> input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
return std::min(std::max(input.at(id), 0.0f), op.getCap());
})();
}
-void NNInterpreter::visit(INode *node, ops::DepthwiseConv2DOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- TensorVariant input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = DepthwiseConv2D(input, op)();
+void NNInterpreter::visit(ops::DepthwiseConv2DOp& op){
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ TensorVariant input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = DepthwiseConv2D(input, op)();
}
-void NNInterpreter::visit(INode *node, ops::BiasAddOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = BiasAdd(input, op.getWeights(), op.getOutputShape(0))();
+void NNInterpreter::visit(ops::BiasAddOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ auto input = var(operand.op->getId())[operand.index];
+ var(op.getId()) = BiasAdd(input, op.getWeights(), op.getOutputShape(0))();
}
-void NNInterpreter::visit(INode *node, ops::BatchNormOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- TensorVariant input(var(operand.node->getId())[operand.index]);
+void NNInterpreter::visit(ops::BatchNormOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ TensorVariant input(var(operand.op->getId())[operand.index]);
// TODO implement this
- var(node->getId()) = BatchNorm<float>(input, op)();
+ var(op.getId()) = BatchNorm<float>(input, op)();
}
-void NNInterpreter::visit(INode *node, ops::ScaleOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- TensorVariant input(var(operand.node->getId())[operand.index]);
+void NNInterpreter::visit(ops::ScaleOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ TensorVariant input(var(operand.op->getId())[operand.index]);
// TODO implement this
- var(node->getId()) = Scale(input, op)();
+ var(op.getId()) = Scale(input, op)();
}
-void NNInterpreter::visit(INode *node, ops::DropoutOp &op)
-{
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- TensorVariant input(var(operand.node->getId())[operand.index]);
+void NNInterpreter::visit(ops::DropoutOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ TensorVariant input(var(operand.op->getId())[operand.index]);
// TODO implement this
- var(node->getId()) = Dropout<float>(input, op)();
+ var(op.getId()) = Dropout<float>(input, op)();
}
-void NNInterpreter::mapByName(INode::Ref n) {
- auto &nodeName = n->getName();
- if (nodeByName.find(nodeName) != nodeByName.end())
+void NNInterpreter::mapByName(Operation* op) {
+ auto &nodeName = op->getName();
+ if (_opByName.find(nodeName) != _opByName.end())
{
// TODO use common debug macro
// std::cout << "Warning: duplicate node name <" + nodeName + "> ignore node." << std::endl;
return;
}
- nodeByName[nodeName] = n;
+ _opByName[nodeName] = op;
}
-void NNInterpreter::visit(INode::Ref node, ops::TanhOp &op) {
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- Tensor<float> input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+void NNInterpreter::visit(ops::TanhOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ Tensor<float> input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
return std::tanh(input.at(id));
})();
}
-void NNInterpreter::visit(INode::Ref node, ops::ElementwiseOp &op) {
- mapByName(node);
- auto operands = node->getPrevNodes();
+void NNInterpreter::visit(ops::ElementwiseOp& op) {
+ mapByName(&op);
+ auto operands = op.getPrevNodes();
std::vector<Tensor<float>> ins;
for (auto &in : operands) {
- ins.push_back(Tensor<float>(var(in.node->getId())[in.index]));
+ ins.push_back(Tensor<float>(var(in.op->getId())[in.index]));
}
float (*func)(float,float); // Another dirty hack
switch (op.getOpType()) {
default:
assert(false && "Not supported Optype");
}
- var(node->getId()) = Fill<float>(op.getOutputShape(0), [&func, &ins, &op](const Index &id) {
+ var(op.getId()) = Fill<float>(op.getOutputShape(0), [&func, &ins, &op](const Index &id) {
float acc = ins[0].at(id);
for (size_t i = 1; i < ins.size() ; i++)
acc = func(acc, ins[i].at(id));
})();
}
-void NNInterpreter::visit(INode::Ref node, ops::DeConv2DOp &op) {
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- var(node->getId()) = DeConv2D(var(operand.node->getId())[operand.index], op)();
+void NNInterpreter::visit(ops::DeConv2DOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ var(op.getId()) = DeConv2D(var(operand.op->getId())[operand.index], op)();
}
-void NNInterpreter::visit(INode::Ref node, ops::EluOp &op) {
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- Tensor<float> input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+void NNInterpreter::visit(ops::EluOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ Tensor<float> input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
if (input.at(id) >= 0)
return input.at(id);
else
})();
}
-void NNInterpreter::visit(INode* node, ops::SqueezeOp& op) {
- mapByName(node);
- auto operand = node->getPrevNodes()[0];
- auto& input = var(operand.node->getId())[operand.index];
+void NNInterpreter::visit(ops::SqueezeOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ auto& input = var(operand.op->getId())[operand.index];
//Squeeze is just a special case of reshape
- var(node->getId()) = Reshape<float>(input, op.getOutputShape(0))();
+ var(op.getId()) = Reshape<float>(input, op.getOutputShape(0))();
}
-void NNInterpreter::visit(INode* node, ops::PadOp& op) {
+void NNInterpreter::visit(ops::PadOp& op) {
throw PassException("Not implemented yet");
}
#include "passes/interpreter/InterpreterPass.h"
#include "core/modelIR/ShapeInference.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/ShapeRange.h"
#include "core/modelIR/Tensor.h"
g->accept(&shapeInference);
- // Check nodes
- auto inputNode = g->getInput(cli::interInNode);
- if (inputNode == nullptr) {
+ // Check ops
+ auto inputOp = g->getInput(cli::interInNode);
+ if (inputOp == nullptr) {
throw PassException("input node <" + cli::interInNode +"> not found" );
}
- auto input = loadInput(inputNode->getOperation()->getOutputShape(0));
+ auto input = loadInput(inputOp->getOutputShape(0));
interpreter.setInput(cli::interInNode, input);
g->accept(&interpreter);
#include <pass/PassException.h>
#include <passes/common_frontend/shape_helper.h>
#include <functional>
-#include "core/modelIR/ir_node.h"
#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/TensorVariant.h"
#include "onnx/onnx_pb.h"
#include "passes/common_frontend/model_allocation.h"
#include "ONNXImporterImpl.h"
#include "ONNXPerfectHash.h"
+#include <iostream>
namespace nnc {
auto name = input.name();
// Every VariableOp relates to one graph input
- auto node = _graph->create<mir::ops::VariableOp>(name);
- _opsForBlobsTheyOutput[name] = node;
+ auto op = _graph->create<mir::ops::VariableOp>(name);
+ _opsForBlobsTheyOutput[name] = op;
if (onnx_tensors.find(name) != onnx_tensors.end()) {
const onnx::TensorProto* onnx_tensor = onnx_tensors[name];
mir::Shape input_shape = ShapeHelper::createShape(onnx_tensor->dims(),
static_cast<size_t>(onnx_tensor->dims_size()));
// WARNING! Temporary solution!
- node->getOperation()->setOutputShape(0, input_shape);
+ op->setOutputShape(0, input_shape);
} else {
assert(!name.compare("data"));
_inputTensors[name] = createTensor(nullptr);
- // TODO: should we update node with special shape?
+ // TODO: should we update op with special shape?
mir::Shape input_shape = ShapeHelper::createShape(std::vector<int>(), 0);
// WARNING! Temporary solution!
- node->getOperation()->setOutputShape(0, input_shape);
+ op->setOutputShape(0, input_shape);
}
std::cout << "Node name '" << name << "' added\n"; // < std::endl;
}
for (auto onnxNode : _model->graph().node()) {
assert(onnxNode.has_op_type());
auto op_type = onnxNode.op_type().c_str();
- std::vector<mir::INode::Ref> input_nodes;
+ std::vector<mir::Operation*> input_nodes;
// Fill inputs of the given node
for (auto name : onnxNode.input()) {
if (_opsForBlobsTheyOutput.find(name) != _opsForBlobsTheyOutput.end())
std::cout << "Node name '" << name << "' was not found\n";
}
std::vector<std::shared_ptr<mir::TensorVariant>> params;
- std::vector<mir::INode::Ref> outputs;
- mir::INode *prev;
+ std::vector<mir::Operation*> outputs;
+ mir::Operation* prev;
auto *opType = ONNXPerfectHash::getONNXOpType(op_type, onnxNode.op_type().size());
// 2 variables used as result of getXXXAttribute()
bool found;
throw PassException("Concat must have 'axis' attribute");
break;
case ONNXOpCode::opReshape:
- outputs = _opCreator.createReshape(input_nodes[0], input_nodes[1]->getOperation()->getOutputShape(0));
+ outputs = _opCreator.createReshape(input_nodes[0], input_nodes[1]->getOutputShape(0));
break;
case ONNXOpCode::opRelu:
outputs = _opCreator.createRelu(input_nodes);
#include <string>
#include <onnx/onnx.pb.h>
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "ONNXOpType.h"
#include "ONNXOpCreator.h"
#include "passes/common_frontend/nn_importer.h"
// This map maps caffe tensor names to MIR operations/nodes
// that correspond to operations having these tensors as output.
- std::map<std::string, mir::INode::Ref> _opsForBlobsTheyOutput;
+ std::map<std::string, mir::Operation*> _opsForBlobsTheyOutput;
// This map keeps named tensors used as graph input initializers.
std::map<std::string, std::shared_ptr<mir::TensorVariant>> _inputTensors;
- std::vector<mir::INode::Ref> _graphOutputs;
+ std::vector<mir::Operation*> _graphOutputs;
std::string _modelFilename;
std::unique_ptr<onnx::ModelProto> _model;
#include <set>
#include <cmath>
#include "core/modelIR/Index.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/ShapeRange.h"
#include "core/modelIR/Tensor.h"
#include "core/modelIR/operations/BatchNormOp.h"
using namespace mir;
-std::vector<INode::Ref> ONNXOpCreator::createConv2D(InputOps inputs, InputParams params,
+std::vector<Operation*> ONNXOpCreator::createConv2D(InputOps inputs, InputParams params,
::onnx::NodeProto node) {
- return std::vector<INode::Ref>();
+ return std::vector<Operation*>();
}
-std::vector<INode::Ref> ONNXOpCreator::createConcat(InputOps inputs, int axis) {
+std::vector<Operation*> ONNXOpCreator::createConcat(InputOps inputs, int axis) {
return createOp<ops::ConcatOp>(inputs, inputs.size(), axis);
}
-std::vector<INode::Ref> ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode opCode) {
- return std::vector<INode::Ref>();
+std::vector<Operation*> ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode opCode) {
+ return std::vector<Operation*>();
}
-std::vector<INode::Ref> ONNXOpCreator::createSoftmax(InputOps inputs, int axis) {
+std::vector<Operation*> ONNXOpCreator::createSoftmax(InputOps inputs, int axis) {
return createOp<ops::SoftmaxOp>(inputs, axis);
}
-std::vector<INode::Ref> ONNXOpCreator::createReshape(INode::Ref inputData, Shape outputShape) {
- std::vector<INode::Ref> inputNodes;
+std::vector<Operation*> ONNXOpCreator::createReshape(Operation* inputData, Shape outputShape) {
+ std::vector<Operation*> inputNodes;
inputNodes.push_back(inputData);
auto outputs = createOp<ops::ReshapeOp>(inputNodes);
- outputs[0]->getOperation()->setOutputShape(0, outputShape);
+ outputs[0]->setOutputShape(0, outputShape);
return outputs;
}
-std::vector<INode::Ref> ONNXOpCreator::createRelu(InputOps inputs) {
+std::vector<Operation*> ONNXOpCreator::createRelu(InputOps inputs) {
assert(inputs.size() == 1);
return createOp<ops::ReluOp>(inputs);
}
-std::vector<INode::Ref> ONNXOpCreator::createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node) {
- return std::vector<INode::Ref>();
+std::vector<Operation*> ONNXOpCreator::createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node) {
+ return std::vector<Operation*>();
}
-std::vector<INode::Ref> ONNXOpCreator::createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node) {
- return std::vector<INode::Ref>();
+std::vector<Operation*> ONNXOpCreator::createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node) {
+ return std::vector<Operation*>();
}
-std::vector<INode::Ref> ONNXOpCreator::createDropout(InputOps inputs, float ratio) {
+std::vector<Operation*> ONNXOpCreator::createDropout(InputOps inputs, float ratio) {
return createOp<ops::SoftmaxOp>(inputs, ratio);
}
-void ONNXOpCreator::connectInputs(INode::Ref op, InputOps inputs) {
+void ONNXOpCreator::connectInputs(Operation* op, InputOps inputs) {
// TODO: this part doesn't support the situation where an operator takes as input
// some tensor that is not the 0th output of some other operator
for (int i = 0; i < static_cast<int>(inputs.size()); ++i)
#include <map>
#include <vector>
#include <memory>
-#include "core/modelIR/graph.h"
-#include "core/modelIR/ir_node.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/operations/common.h"
#include "core/modelIR/Shape.h"
class ONNXOpCreator {
public:
- using InputOps = std::vector<nnc::mir::INode::Ref>&;
+ using InputOps = std::vector<nnc::mir::Operation*>&;
using InputParams = std::vector<std::shared_ptr<nnc::mir::TensorVariant>>&;
ONNXOpCreator() {};
- std::vector<nnc::mir::INode::Ref> createConv2D(InputOps inputs, InputParams params, ::onnx::NodeProto node);
- std::vector<nnc::mir::INode::Ref> createConcat(InputOps inputs, int axis);
- std::vector<nnc::mir::INode::Ref> createPool(InputOps inputs, ONNXOpCode opCode);
- std::vector<nnc::mir::INode::Ref> createSoftmax(InputOps inputs, int axis);
- std::vector<nnc::mir::INode::Ref> createReshape(nnc::mir::INode::Ref inputData, nnc::mir::Shape outputShape);
- std::vector<nnc::mir::INode::Ref> createRelu(InputOps inputs);
- std::vector<nnc::mir::INode::Ref> createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node);
- std::vector<nnc::mir::INode::Ref> createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node);
- std::vector<nnc::mir::INode::Ref> createDropout(InputOps inputs, float ratio);
+ std::vector<nnc::mir::Operation*> createConv2D(InputOps inputs, InputParams params, ::onnx::NodeProto node);
+ std::vector<nnc::mir::Operation*> createConcat(InputOps inputs, int axis);
+ std::vector<nnc::mir::Operation*> createPool(InputOps inputs, ONNXOpCode opCode);
+ std::vector<nnc::mir::Operation*> createSoftmax(InputOps inputs, int axis);
+ std::vector<nnc::mir::Operation*> createReshape(nnc::mir::Operation* inputData, nnc::mir::Shape outputShape);
+ std::vector<nnc::mir::Operation*> createRelu(InputOps inputs);
+ std::vector<nnc::mir::Operation*> createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node);
+ std::vector<nnc::mir::Operation*> createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node);
+ std::vector<nnc::mir::Operation*> createDropout(InputOps inputs, float ratio);
void setMirGraph(mir::Graph* g){
_graph = g;
}
private:
- void connectInputs(nnc::mir::INode::Ref op, std::vector<nnc::mir::INode::Ref>& inputs);
+ void connectInputs(nnc::mir::Operation* op, std::vector<nnc::mir::Operation*>& inputs);
template <typename OpType, typename ...Types>
- std::vector<nnc::mir::INode::Ref> createOp(std::vector<nnc::mir::INode::Ref>& inputs, Types&&... args);
+ std::vector<nnc::mir::Operation*> createOp(std::vector<nnc::mir::Operation*>& inputs, Types&&... args);
mir::Graph* _graph = nullptr;
};
template<typename OpType, typename ...Types>
-std::vector<nnc::mir::INode::Ref> ONNXOpCreator::createOp(std::vector<nnc::mir::INode::Ref>& inputs, Types&&... args) {
- std::vector<nnc::mir::INode::Ref> outputs;
+std::vector<nnc::mir::Operation*> ONNXOpCreator::createOp(std::vector<nnc::mir::Operation*>& inputs, Types&&... args) {
+ std::vector<nnc::mir::Operation*> outputs;
// TODO: set operation names
auto op = _graph->create<OpType>("", std::forward<Types>(args)...);
printTmpTensors(out, ma, _formattedTensors, op);
// materialize call
out << " " << op._opName << "(";
- const auto &prevNodes = op._node->getPrevNodes();
+ const auto &prevNodes = op._op->getPrevNodes();
const auto &outTensors = op._outputs;
vector<string> args;
args.reserve(prevNodes.size() + outTensors.size() + 1);
#include <limits>
#include "ModelAnalyzer.h"
-#include "core/modelIR/ir_node.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/ShapeRange.h"
#include "core/modelIR/operations/DropoutOp.h"
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
+#include "core/modelIR/operations/VariableOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
using namespace std;
using namespace nnc::mir;
-void ModelAnalyzer::addOpDescr(INode *node, const string &opName)
+void ModelAnalyzer::addOpDescr(Operation* op, const string& opName)
{
OpDescr::Type type = OpDescr::Type::ORDINARY;
vector<size_t> nodeOutputs;
- const std::string &name = node->getName();
+ const std::string &name = op->getName();
size_t nodeTid = INVALID_TENSOR_ID;
- if (node->getPrevNodes().empty())
+ if (op->getPrevNodes().empty())
{
- // process input node
- Shape inputShape = node->getOperation()->getOutputShape(0);
+ // process input op
+ Shape inputShape = op->getOutputShape(0);
nodeTid = allocateTensor(name, TensorDescription::Type::IN, &inputShape);
_inputs.push_back(nodeTid);
type = OpDescr::Type::IN;
}
else if (!name.empty())
{
- // process output node
+ // process output op
nodeTid = allocateTensor(name, TensorDescription::Type::OUT);
_named_tensors.push_back(nodeTid);
type = OpDescr::Type::OUT;
}
else
{
- // process ordinary node
+ // process ordinary op
nodeTid = allocateTensor();
}
assert(nodeTid != INVALID_TENSOR_ID);
nodeOutputs.push_back(nodeTid);
- // process node outputs
- // consider node as output if it has no consumers
- if (node->getNextNodes().empty())
+ // process op outputs
+ // consider op as output if it has no consumers
+ if (op->getNextNodes().empty())
{
assert(type == OpDescr::Type::OUT);
_outputs.push_back(nodeTid);
}
- // process node inputs
+ // process op inputs
vector<size_t> nodeInputs;
- for (const INode::IODescriptor &d: node->getPrevNodes())
+ for (const IODescriptor &d: op->getPrevNodes())
{
size_t idx = d.index;
- INode *node = d.node;
- assert(_nodeToDescr.find(node) != _nodeToDescr.end());
- const OpDescr &descr = *_nodeToDescr[node];
+ Operation *op = d.op;
+ assert(_opToDescr.find(op) != _opToDescr.end());
+ const OpDescr &descr = *_opToDescr[op];
const size_t &inTid = descr._outputs[idx];
nodeInputs.push_back(inTid);
}
- _inferenceSequence.push_back({type, node, opName,
+ _inferenceSequence.push_back({type, op, opName,
std::move(nodeInputs),
std::move(nodeOutputs),
0});
- _nodeToDescr[node] = &_inferenceSequence.back();
+ _opToDescr[op] = &_inferenceSequence.back();
}
size_t ModelAnalyzer::allocateTensor(const string &name, TensorDescription::Type type, Shape *shape)
return id;
}
-void ModelAnalyzer::visit(INode *node, ops::ConcatOp &op)
-{
- addOpDescr(node, "concat");
+void ModelAnalyzer::visit(ops::ConcatOp& op) {
+ addOpDescr(&op, "concat");
}
-void ModelAnalyzer::visit(INode *node, ops::Conv2DOp &op)
-{
- addOpDescr(node, "conv2d");
+void ModelAnalyzer::visit(ops::Conv2DOp& op) {
+ addOpDescr(&op, "conv2d");
}
-void ModelAnalyzer::visit(INode *node, ops::DepthwiseConv2DOp &op)
-{
- addOpDescr(node, "depthwiseConv2d");
+void ModelAnalyzer::visit(ops::DepthwiseConv2DOp& op) {
+ addOpDescr(&op, "depthwiseConv2d");
}
-void ModelAnalyzer::visit(INode *node, ops::SoftmaxOp &op)
-{
- addOpDescr(node, "softmax");
+void ModelAnalyzer::visit(ops::SoftmaxOp& op) {
+ addOpDescr(&op, "softmax");
}
/**
* Model Ir does not separate different types of pool operations, but for code generation
* it is easier to implement different types of pooling by different functions
*/
-void ModelAnalyzer::visit(INode *node, ops::PoolOp &op)
-{
- const char *funcName = nullptr;
- switch (op.getPoolingType())
- {
- case ops::PoolOp::PoolingType::MAX:
- funcName = "maxPool";
- break;
- case ops::PoolOp::PoolingType::AVG:
- funcName = "avgPool";
- break;
- default:
- assert(false && "unsupported pooling type");
+void ModelAnalyzer::visit(ops::PoolOp& op) {
+ const char* funcName = nullptr;
+ switch (op.getPoolingType()) {
+ case ops::PoolOp::PoolingType::MAX:
+ funcName = "maxPool";
+ break;
+ case ops::PoolOp::PoolingType::AVG:
+ funcName = "avgPool";
+ break;
+ default:
+ assert(false && "unsupported pooling type");
}
- addOpDescr(node, funcName);
+ addOpDescr(&op, funcName);
}
-void ModelAnalyzer::visit(INode *node, ops::FullyConnectedOp &op)
-{
- addOpDescr(node, "fullConnect");
+void ModelAnalyzer::visit(ops::FullyConnectedOp& op) {
+ addOpDescr(&op, "fullConnect");
}
-void ModelAnalyzer::visit(INode *node, ops::CappedReluOp &op)
-{
- addOpDescr(node, "cappedRelu");
+void ModelAnalyzer::visit(ops::CappedReluOp& op) {
+ addOpDescr(&op, "cappedRelu");
}
-void ModelAnalyzer::visit(INode *node, ops::BiasAddOp &op)
-{
- addOpDescr(node, "biasAdd");
+void ModelAnalyzer::visit(ops::BiasAddOp& op) {
+ addOpDescr(&op, "biasAdd");
}
-void ModelAnalyzer::visit(INode *node, ops::VariableOp &op)
-{
- assert(node->getPrevNodes().empty());
- addOpDescr(node, "in");
+void ModelAnalyzer::visit(ops::VariableOp& op) {
+ assert(op.getPrevNodes().empty());
+ addOpDescr(&op, "in");
}
-void ModelAnalyzer::visit(INode *node, ops::ReluOp &op)
-{
- addOpDescr(node, "relu");
+void ModelAnalyzer::visit(ops::ReluOp& op) {
+ addOpDescr(&op, "relu");
}
-void ModelAnalyzer::visit(INode *node, ops::ReshapeOp &op)
-{
- addOpDescr(node, "reshape");
+void ModelAnalyzer::visit(ops::ReshapeOp& op) {
+ addOpDescr(&op, "reshape");
}
-void ModelAnalyzer::visit(INode *node, ops::DropoutOp &op)
-{
- addOpDescr(node, "dropout");
+void ModelAnalyzer::visit(ops::DropoutOp& op) {
+ addOpDescr(&op, "dropout");
}
-void ModelAnalyzer::visit(INode *node, ops::ScaleOp &op)
-{
- addOpDescr(node, "scale");
+void ModelAnalyzer::visit(ops::ScaleOp& op) {
+ addOpDescr(&op, "scale");
}
-void ModelAnalyzer::visit(INode *node, ops::BatchNormOp &op) {
- addOpDescr(node, "batchNorm");
+void ModelAnalyzer::visit(ops::BatchNormOp& op) {
+ addOpDescr(&op, "batchNorm");
}
-void ModelAnalyzer::visit(mir::INode *node, mir::ops::TanhOp &op) {
- addOpDescr(node, "tanh");
+void ModelAnalyzer::visit(mir::ops::TanhOp& op) {
+ addOpDescr(&op, "tanh");
}
-void ModelAnalyzer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) {
+void ModelAnalyzer::visit(mir::ops::ElementwiseOp& op) {
const char *funcName = nullptr;
switch ( op.getOpType() ) {
case ops::ElementwiseOp::OpType::sum:
default:
assert(false && "unsupported elementwise operation type");
}
- addOpDescr(node, funcName);
+ addOpDescr(&op, funcName);
}
-void ModelAnalyzer::visit(mir::INode *node, mir::ops::EluOp &op) {
- addOpDescr(node, "elu");
+void ModelAnalyzer::visit(mir::ops::EluOp& op) {
+ addOpDescr(&op, "elu");
}
-void ModelAnalyzer::visit(mir::INode *node, mir::ops::DeConv2DOp &op) {
- addOpDescr(node, "transposedconv2d");
+void ModelAnalyzer::visit(mir::ops::DeConv2DOp& op) {
+ addOpDescr(&op, "transposedconv2d");
}
-void ModelAnalyzer::visit(INode* node, ops::SqueezeOp& op) {
- addOpDescr(node, "squeeze");
+void ModelAnalyzer::visit(ops::SqueezeOp& op) {
+ addOpDescr(&op, "reshape");
}
-void ModelAnalyzer::visit(mir::INode* node, mir::ops::PadOp& op) {
+void ModelAnalyzer::visit(mir::ops::PadOp& op) {
assert(false && "Not implemented yet");
}
#include "core/modelIR/Visitor.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/TensorVariant.h"
+#include "core/modelIR/Operation.h"
#include <vector>
#include <list>
};
Type _type;
- mir::INode *_node;
+ mir::Operation* _op;
std::string _opName;
// list of input tensors
std::vector<size_t> _inputs;
* @brief Constructs inference sequence for given computational graph,
* gathers list of variables used in artifact.
*/
-class ModelAnalyzer: public mir::IVisitor
-{
+class ModelAnalyzer: public mir::IVisitor {
public:
- void visit(mir::INode *node, mir::ops::ConcatOp &op) override;
- void visit(mir::INode *node, mir::ops::Conv2DOp &op) override;
- void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override;
- void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override;
- void visit(mir::INode *node, mir::ops::PoolOp &op) override;
- void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override;
- void visit(mir::INode *node, mir::ops::CappedReluOp &op) override;
- void visit(mir::INode *node, mir::ops::BiasAddOp &op) override;
- void visit(mir::INode *node, mir::ops::VariableOp &op) override;
- void visit(mir::INode *node, mir::ops::ReluOp &op) override;
- void visit(mir::INode *node, mir::ops::ReshapeOp &op) override;
- void visit(mir::INode *node, mir::ops::ScaleOp &op) override;
- void visit(mir::INode *node, mir::ops::BatchNormOp &op) override;
- void visit(mir::INode *node, mir::ops::DropoutOp &op) override;
- void visit(mir::INode *node, mir::ops::TanhOp &op) override;
- void visit(mir::INode *node, mir::ops::ElementwiseOp &op) override;
- void visit(mir::INode *node, mir::ops::DeConv2DOp &op) override;
- void visit(mir::INode *node, mir::ops::EluOp &op) override;
- void visit(mir::INode* node, mir::ops::SqueezeOp& op) override;
- void visit(mir::INode* node, mir::ops::PadOp& op) override;
+ void visit(mir::ops::ConcatOp& op) override;
+ void visit(mir::ops::Conv2DOp& op) override;
+ void visit(mir::ops::DepthwiseConv2DOp& op) override;
+ void visit(mir::ops::SoftmaxOp& op) override;
+ void visit(mir::ops::PoolOp& op) override;
+ void visit(mir::ops::FullyConnectedOp& op) override;
+ void visit(mir::ops::CappedReluOp& op) override;
+ void visit(mir::ops::BiasAddOp& op) override;
+ void visit(mir::ops::VariableOp& op) override;
+ void visit(mir::ops::ReluOp& op) override;
+ void visit(mir::ops::ReshapeOp& op) override;
+ void visit(mir::ops::ScaleOp& op) override;
+ void visit(mir::ops::BatchNormOp& op) override;
+ void visit(mir::ops::DropoutOp& op) override;
+ void visit(mir::ops::TanhOp& op) override;
+ void visit(mir::ops::ElementwiseOp& op) override;
+ void visit(mir::ops::DeConv2DOp& op) override;
+ void visit(mir::ops::EluOp& op) override;
+ void visit(mir::ops::SqueezeOp& op) override;
+ void visit(mir::ops::PadOp& op) override;
/**
* @return vector of id's of network input tensors
private:
/**
* @brief Common function to add function call in inference sequence
- * @param node Node representing added call
+ * @param op Node representing added call
* @param name Function name
*
* Inserts information about CG operation into inference sequence: name of operation,
* creates tensors for operation outputs, binds operation inputs with tensors from previous operations
*/
- void addOpDescr(mir::INode *node, const std::string &name);
+ void addOpDescr(mir::Operation* op, const std::string& name);
enum class TensorType
{
std::vector<size_t> _named_tensors;
std::vector<size_t> _outputs;
std::vector<TensorDescription> _tensors;
- std::map<const mir::INode *, OpDescr *> _nodeToDescr;
+ std::map<const mir::Operation*, OpDescr*> _opToDescr;
};
} // namespace nnc
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
-#include "core/modelIR/ir_node.h"
#include "pass/PassException.h"
#include <algorithm>
using nnc::mir::ShapeRange;
using nnc::mir::transposeTensor;
using nnc::mir::TensorVariant;
-using nnc::mir::INode;
namespace ops = nnc::mir::ops;
}
template<class Op>
-void Serializer::serializePads(const Op &op, int32_t padsRank)
+void Serializer::serializePads(const Op& op, int32_t padsRank)
{
// serialize padding type
assert(etoi(op.getPaddingType()) < MAX_ENUM_VAL);
}
}
-void Serializer::visit(INode *node, ops::ConcatOp &op)
-{
+void Serializer::visit(ops::ConcatOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// axis number should fit into one byte
assert(op.getAxis() <= MAX_DIMS);
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode *node, ops::Conv2DOp &op)
-{
+void Serializer::visit(ops::Conv2DOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// serialize kernel
shared_ptr<TensorVariant> HWCNKernel = make_shared<TensorVariant>(op.getKernel());
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode *node, ops::DepthwiseConv2DOp &op)
-{
+void Serializer::visit(ops::DepthwiseConv2DOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// serialize kernel
const TensorVariant &kernel = op.getKernel();
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode *node, ops::SoftmaxOp &op)
-{
+void Serializer::visit(ops::SoftmaxOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// axis number should fit into one byte
assert(op.getAxis() <= MAX_DIMS);
serializeT<int32_t>(op.getAxis());
}
-void Serializer::visit(INode *node, ops::PoolOp &op)
-{
+void Serializer::visit(ops::PoolOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// serialize window shape
const Shape &windowShape = op.getWindowShape();
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode *node, ops::FullyConnectedOp &op)
-{
+void Serializer::visit(ops::FullyConnectedOp& op) {
_curOp->_paramStartOffset = _buffer.size();
shared_ptr<TensorVariant> weights = make_shared<TensorVariant>(op.getWeights());
shared_ptr<TensorVariant> transposedWeights = transposeTensor<1, 0>(weights);
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode *node, ops::CappedReluOp &op)
-{
+void Serializer::visit(ops::CappedReluOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getCap());
}
-void Serializer::visit(INode *node, ops::BiasAddOp &op)
-{
+void Serializer::visit(ops::BiasAddOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeTensor(op.getWeights());
}
-void Serializer::visit(INode *node, ops::VariableOp &op)
-{
+void Serializer::visit(ops::VariableOp& op) {
// no parameters to dump
}
-void Serializer::visit(INode *node, ops::ReluOp &op)
-{
+void Serializer::visit(ops::ReluOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// no parameters to dump
}
-void Serializer::visit(INode *node, ops::ReshapeOp &op)
-{
+void Serializer::visit(ops::ReshapeOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode *node, ops::BatchNormOp &op)
-{
+void Serializer::visit(ops::BatchNormOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getEps());
serializeT<float>(op.getMovingAvgFraction());
serializeT<int32_t>(op.getSpatial());
}
-void Serializer::visit(INode *node, ops::ScaleOp &op)
-{
+void Serializer::visit(ops::ScaleOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeTensor(op.getWeights());
}
-void Serializer::visit(INode *node, ops::DropoutOp &op)
-{
+void Serializer::visit(ops::DropoutOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getRate());
}
{
for (OpDescr &descr: inferenceSequence)
{
- INode *node = descr._node;
_curOp = &descr;
- node->accept(this);
+ descr._op->accept(this);
}
}
-void Serializer::visit(mir::INode *node, mir::ops::TanhOp &op) {
+void Serializer::visit(mir::ops::TanhOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// no parameters to dump
}
-void Serializer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) {
+void Serializer::visit(mir::ops::ElementwiseOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// Op type is known at codegen Time
serializeT<int>((int32_t) op.getNumInputs());
}
-void Serializer::visit(mir::INode *node, mir::ops::EluOp &op) {
+void Serializer::visit(mir::ops::EluOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getAlpha());
}
-void Serializer::visit(mir::INode *node, mir::ops::DeConv2DOp &op) {
+void Serializer::visit(mir::ops::DeConv2DOp& op) {
_curOp->_paramStartOffset = _buffer.size();
// serialize kernel
shared_ptr<TensorVariant> HWCNKernel = make_shared<TensorVariant>(op.getKernel());
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(INode* node, ops::SqueezeOp& op) {
+void Serializer::visit(ops::SqueezeOp& op) {
_curOp->_paramStartOffset = _buffer.size();
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(mir::INode* node, mir::ops::PadOp& op) {
+void Serializer::visit(mir::ops::PadOp& op) {
throw PassException("Not implemented yet");
}
* To gather this vector use `getBuffer` method.
* Objects of this class are one-off and not designed to serialize more than one IR
*/
-class Serializer: public mir::IVisitor
-{
+class Serializer: public mir::IVisitor {
public:
- void visit(mir::INode *node, mir::ops::ConcatOp &op) override;
- void visit(mir::INode *node, mir::ops::Conv2DOp &op) override;
- void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override;
- void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override;
- void visit(mir::INode *node, mir::ops::PoolOp &op) override;
- void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override;
- void visit(mir::INode *node, mir::ops::CappedReluOp &op) override;
- void visit(mir::INode *node, mir::ops::BiasAddOp &op) override;
- void visit(mir::INode *node, mir::ops::VariableOp &op) override;
- void visit(mir::INode *node, mir::ops::ReluOp &op) override;
- void visit(mir::INode *node, mir::ops::ReshapeOp &op) override;
- void visit(mir::INode *node, mir::ops::ScaleOp &op) override;
- void visit(mir::INode *node, mir::ops::BatchNormOp &op) override;
- void visit(mir::INode *node, mir::ops::DropoutOp &op) override;
- void visit(mir::INode *node, mir::ops::TanhOp &op) override;
- void visit(mir::INode *node, mir::ops::ElementwiseOp &op) override;
- void visit(mir::INode *node, mir::ops::DeConv2DOp &op) override;
- void visit(mir::INode *node, mir::ops::EluOp &op) override;
- void visit(mir::INode* node, mir::ops::SqueezeOp& op) override;
- void visit(mir::INode* node, mir::ops::PadOp& op) override;
+ void visit(mir::ops::ConcatOp& op) override;
+ void visit(mir::ops::Conv2DOp& op) override;
+ void visit(mir::ops::DepthwiseConv2DOp& op) override;
+ void visit(mir::ops::SoftmaxOp& op) override;
+ void visit(mir::ops::PoolOp& op) override;
+ void visit(mir::ops::FullyConnectedOp& op) override;
+ void visit(mir::ops::CappedReluOp& op) override;
+ void visit(mir::ops::BiasAddOp& op) override;
+ void visit(mir::ops::VariableOp& op) override;
+ void visit(mir::ops::ReluOp& op) override;
+ void visit(mir::ops::ReshapeOp& op) override;
+ void visit(mir::ops::ScaleOp& op) override;
+ void visit(mir::ops::BatchNormOp& op) override;
+ void visit(mir::ops::DropoutOp& op) override;
+ void visit(mir::ops::TanhOp& op) override;
+ void visit(mir::ops::ElementwiseOp& op) override;
+ void visit(mir::ops::DeConv2DOp& op) override;
+ void visit(mir::ops::EluOp& op) override;
+ void visit(mir::ops::SqueezeOp& op) override;
+ void visit(mir::ops::PadOp& op) override;
void serialize(std::list<OpDescr> &inferenceSequence);
// So far we assume that if the first dimension is equal to 1,
// then it is the batch dimension and should be ignored
ShapeHelper::cutOffBatchDim(inputShape);
- node->getOperation()->setOutputShape(0, inputShape);
+ node->setOutputShape(0, inputShape);
}
for (auto op: *(s->operators()))
auto inputs = getPrecedingMIROps(op);
auto params = createOpParams(op);
- std::vector<INode::Ref> outputs;
+ std::vector<mir::Operation*> outputs;
unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
switch (opcode) {
_opsForTensorsTheyOutput[(*(op->outputs()))[i]] = outputs[i];
}
-std::vector<INode::Ref> TfliteImporter::getPrecedingMIROps(const Operator* op) {
- std::vector<INode::Ref> inputsForOp;
+std::vector<mir::Operation*> TfliteImporter::getPrecedingMIROps(const Operator* op) {
+ std::vector<mir::Operation*> inputsForOp;
try {
for (auto i : *(op->inputs())) {
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params,
const Conv2DOptions* opts) {
auto outputs = createOp<ops::Conv2DOp>(inputs, ActivationFunctionType_NONE, std::move(*params[0]),
Shape{static_cast<int32_t>(opts->stride_h()),
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params,
- const DepthwiseConv2DOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs,
+ InputParams params,
+ const DepthwiseConv2DOptions* opts) {
auto outputs = createOp<ops::DepthwiseConv2DOp>(
inputs, ActivationFunctionType_NONE, std::move(*params[0]),
Shape{static_cast<int32_t>(opts->stride_h()),
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertConcatenation(InputOps inputs, InputParams params,
- const ConcatenationOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::convertConcatenation(InputOps inputs,
+ InputParams params,
+ const ConcatenationOptions* opts) {
// Decrementing axis to account for the unnecessary batch dimension
return createOp<ops::ConcatOp>(inputs, opts->fused_activation_function(), inputs.size(),
opts->axis() - 1);
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params,
- const Pool2DOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params,
+ const Pool2DOptions* opts) {
return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
Shape{static_cast<int32_t>(opts->filter_height()),
static_cast<int32_t>(opts->filter_width()), 1},
ops::PoolOp::BorderType::EMPTY);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertAveragePool2D(InputOps inputs, InputParams params,
- const Pool2DOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps inputs,
+ InputParams params,
+ const Pool2DOptions* opts) {
return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
Shape{static_cast<int32_t>(opts->filter_height()),
static_cast<int32_t>(opts->filter_width()), 1},
ops::PoolOp::BorderType::EMPTY);
}
-std::vector<INode::Ref> TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params,
- const SoftmaxOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params,
+ const SoftmaxOptions* opts) {
// -1 represents last one dimension
return createOp<ops::SoftmaxOp>(inputs, ActivationFunctionType_NONE, -1);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params,
- const ReshapeOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params,
+ const ReshapeOptions* opts) {
auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
// TODO: we should also support "-1" values in new_shape, which means that correct
// shape values must be calculated. Better do it in the shape inference module.
Shape newShape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
- outputs[0]->getOperation()->setOutputShape(0, newShape);
+ outputs[0]->setOutputShape(0, newShape);
return outputs;
}
void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
- std::set<std::string>& problems_op_set) {
+ std::set<std::string>& problems_op_set) {
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<INode::Ref> TFLiteOpCreator::convertFullyConnected(InputOps& inputs,
- InputParams& params,
- const FullyConnectedOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::convertFullyConnected(InputOps& inputs,
+ InputParams& params,
+ const FullyConnectedOptions* opts) {
// Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
int32_t fcInputSize = params[0]->getShape().dim(0);
- outputs[0]->getOperation()->setOutputShape(0, {1, fcInputSize});
+ outputs[0]->setOutputShape(0, {1, fcInputSize});
auto fc_outputs = createOp<ops::FullyConnectedOp>(outputs, ActivationFunctionType_NONE,
std::move(*params[0]));
}
void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type,
- std::set<std::string>& problems_op_set) {
+ std::set<std::string>& problems_op_set) {
if (activation_type != ActivationFunctionType_NONE
&& activation_type != ActivationFunctionType_RELU
&& activation_type != ActivationFunctionType_RELU6)
+ EnumNamesActivationFunctionType()[activation_type]);
}
-INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input,
- ActivationFunctionType activation_type) {
- INode::Ref activation;
+mir::Operation* TFLiteOpCreator::addFusedActivation(mir::Operation* input,
+ ActivationFunctionType activation_type) {
+ mir::Operation* activation;
if (activation_type != ActivationFunctionType_NONE) {
// TODO: process other activation types
assert(false && "Unsupported activation types must be detected before this pass");
}
- assert(input->getOperation()->getNumOutputs() == 1);
+ assert(input->getNumOutputs() == 1);
activation->connectInputTo(0, input->getOutput(0));
return activation;
} else {
}
}
-void TFLiteOpCreator::connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs) {
+void TFLiteOpCreator::connectInputs(mir::Operation* op, std::vector<mir::Operation*>& inputs) {
// TODO: this part doesn't support the situation where an operator takes as input
// some tensor that is not the 0th output of some other operator
- assert(inputs.size() == op->getOperation()->getNumInputs());
+ assert(inputs.size() == op->getNumInputs());
for (size_t i = 0; i < inputs.size(); ++i)
op->connectInputTo(i, inputs[i]->getOutput(0));
}
-std::vector<INode*> TFLiteOpCreator::createSqueeze(InputOps inputs, InputParams params,
- const ::tflite::SqueezeOptions* opts) {
+std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(InputOps inputs, InputParams params,
+ const ::tflite::SqueezeOptions* opts) {
std::vector<int32_t> squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()};
#include <memory>
#include <cstdint>
-#include "core/modelIR/graph.h"
-#include "core/modelIR/ir_node.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/Shape.h"
namespace ops = mir::ops;
using mir::Graph;
-using mir::INode;
using IrTensor = mir::TensorVariant;
using mir::Shape;
class TFLiteOpCreator {
public:
- using InputOps = std::vector<INode::Ref>&;
+ using InputOps = std::vector<mir::Operation*>&;
using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
explicit TFLiteOpCreator(Graph* g) : graph(g) {};
- std::vector<INode::Ref> convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
+ std::vector<mir::Operation*> convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
- std::vector<INode::Ref> convertDepthwiseConv2D(InputOps, InputParams,
- const ::tflite::DepthwiseConv2DOptions*);
+ std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps, InputParams,
+ const ::tflite::DepthwiseConv2DOptions*);
- std::vector<INode::Ref> convertConcatenation(InputOps, InputParams,
- const ::tflite::ConcatenationOptions*);
+ std::vector<mir::Operation*> convertConcatenation(InputOps, InputParams,
+ const ::tflite::ConcatenationOptions*);
- std::vector<INode::Ref> convertMaxPool2D(InputOps, InputParams, const ::tflite::Pool2DOptions*);
+ std::vector<mir::Operation*> convertMaxPool2D(InputOps, InputParams,
+ const ::tflite::Pool2DOptions*);
- std::vector<INode::Ref> convertAveragePool2D(InputOps, InputParams,
- const ::tflite::Pool2DOptions*);
+ std::vector<mir::Operation*> convertAveragePool2D(InputOps, InputParams,
+ const ::tflite::Pool2DOptions*);
- std::vector<INode::Ref> createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*);
+ std::vector<mir::Operation*> createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*);
- std::vector<INode::Ref> convertReshape(InputOps, InputParams, const ::tflite::ReshapeOptions*);
+ std::vector<mir::Operation*> convertReshape(InputOps, InputParams,
+ const ::tflite::ReshapeOptions*);
- std::vector<INode::Ref> convertFullyConnected(InputOps, InputParams,
- const ::tflite::FullyConnectedOptions*);
+ std::vector<mir::Operation*> convertFullyConnected(InputOps, InputParams,
+ const ::tflite::FullyConnectedOptions*);
- std::vector<INode*> createSqueeze(InputOps& inputs, InputParams& params, const ::tflite::SqueezeOptions* opts);
+ std::vector<mir::Operation*> createSqueeze(InputOps& inputs, InputParams& params,
+ const ::tflite::SqueezeOptions* opts);
void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
void checkActivationType(::tflite::ActivationFunctionType, std::set<std::string>&);
- INode::Ref addFusedActivation(INode::Ref input, ::tflite::ActivationFunctionType activationType);
+ mir::Operation* addFusedActivation(mir::Operation* input,
+ ::tflite::ActivationFunctionType activationType);
- void connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs);
+ void connectInputs(mir::Operation* op, std::vector<mir::Operation*>& inputs);
template<typename OpType, typename... Types>
- std::vector<INode::Ref> createOp(std::vector<INode::Ref>& inputs,
- ::tflite::ActivationFunctionType activation, Types&& ... args);
+ std::vector<mir::Operation*> createOp(std::vector<mir::Operation*>& inputs,
+ ::tflite::ActivationFunctionType activation,
+ Types&& ... args);
};
template<typename OpType, typename... Types>
-std::vector<INode::Ref> TFLiteOpCreator::createOp(
- std::vector<INode::Ref>& inputs,
+std::vector<mir::Operation*> TFLiteOpCreator::createOp(
+ std::vector<mir::Operation*>& inputs,
::tflite::ActivationFunctionType activation, Types&& ... args) {
- std::vector<INode::Ref> outputs;
+ std::vector<mir::Operation*> outputs;
// TODO: how to name operations? in Tensorflow tensors get names, not operations
auto op = graph->create<OpType>("", std::forward<Types>(args)...);
using namespace nnc::mir;
-static INode::Ref createFullyConnected(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createFullyConnected(std::unique_ptr<Graph>& g,
+ const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::FullyConnectedOp>(
"y", *getKernel(opInfo));
}
-static INode::Ref createConv2D(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createConv2D(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::Conv2DOp>(
"y", *getKernel(opInfo), getShapeParam(opInfo, 0), getPaddingType(opInfo));
}
-static INode::Ref createDepthwiseConv2D(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createDepthwiseConv2D(std::unique_ptr<Graph>& g,
+ const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::DepthwiseConv2DOp>(
"y", *getKernel(opInfo), getShapeParam(opInfo, 0), getPaddingType(opInfo));
}
-static INode::Ref createPool(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createPool(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::PoolOp>("y", getShapeParam(opInfo, 0), getShapeParam(opInfo, 1),
getPoolingType(opInfo), getPaddingType(opInfo), ops::PoolOp::BorderType::ZEROFILLED);
}
-static INode::Ref createConcatenation(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createConcatenation(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::ConcatOp>("y", opInfo->inputs()->size(), getAxis(opInfo));
}
-static INode::Ref createReshape(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createReshape(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
auto op = g->create<ops::ReshapeOp>("y");
- op->getOperation()->setOutputShape(0, getShapeParam(opInfo, 0));
+ op->setOutputShape(0, getShapeParam(opInfo, 0));
return op;
}
-static INode::Ref createReLU(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createReLU(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
(void)opInfo;
return g->create<ops::ReluOp>("y");
}
-static INode::Ref createCappedReLU(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createCappedReLU(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::CappedReluOp>("y", getAxis(opInfo));
}
-static INode::Ref createSoftmax(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createSoftmax(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::SoftmaxOp>("y", getAxis(opInfo));
}
-static INode::Ref createBiasAdd(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createBiasAdd(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
return g->create<ops::BiasAddOp>("y", *getKernel(opInfo));
}
-static INode::Ref createOp(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
+static Operation* createOp(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
switch (opInfo->op())
{
for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i)
{
// Create i-th input node
- auto inputNode = g->create<ops::VariableOp>("x" + std::to_string(i));
+ auto inputOp = g->create<ops::VariableOp>("x" + std::to_string(i));
// Connect i-th operation input to i-th input node
- opNode->connectInputTo(i, inputNode->getOutput(0));
+ opNode->connectInputTo(i, inputOp->getOutput(0));
// Set input shape
auto inputShapeIter = opInfo->inputs()->Get(i)->shape()->dims();
Shape inputShape = ShapeHelper::createShape(*inputShapeIter, inputShapeIter->size());
- inputNode->getOperation()->setOutputShape(0, inputShape);
+ inputOp->setOutputShape(0, inputShape);
}
// Mark outputs
#ifndef NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H
#define NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
std::unique_ptr<nnc::mir::Graph> make_graph(const opinfo::OperatorInfo* opInfo);
#include "op_info_generated.h"
#include "passes/interpreter/Interpreter.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "op_info_util.h"
#include "graph_creator.h"
#include "support/CommandLine.h"
#include "option/Options.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/VariableOp.h"
// Creates simple graph with input and output
void fillGraph(Graph &g)
{
- INode *opNode = g.create<ops::ReluOp>("out");
+ Operation* outputOp = g.create<ops::ReluOp>("out");
Shape inputShape{1, 2, 3};
- INode *inputNode = g.create<ops::VariableOp>("in");
+ Operation* inputOp = g.create<ops::VariableOp>("in");
- opNode->connectInputTo(0, inputNode->getOutput(0));
+ outputOp->connectInputTo(0, inputOp->getOutput(0));
- inputNode->getOperation()->setOutputShape(0, inputShape);
+ inputOp->setOutputShape(0, inputShape);
- g.markOutput(opNode);
+ g.markOutput(outputOp);
ShapeInference shapeInferencer;
g.accept(&shapeInferencer);
-set(TESTS "ir_node.cpp"
- "operation.cpp"
+set(TESTS "operation.cpp"
"ShapeIndex.cpp"
"ShapeInference.cpp"
"ShapeRange.cpp"
#include <gtest/gtest.h>
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
+#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/operations/ReluOp.h"
-
#include "core/modelIR/operations/ConcatOp.h"
namespace {
public:
DumpVisitor(std::ostream& s) : _s(s) {}
- void visit(INode* node, ops::VariableOp& op) override {
- _s << "i" << node->getName();
+ void visit(ops::VariableOp& op) override {
+ _s << "i" << op.getName();
};
- void visit(INode* node, ops::ReluOp& op) override {
- _s << "r" << node->getName();
+ void visit(ops::ReluOp& op) override {
+ _s << "r" << op.getName();
}
- void visit(INode* node, ops::ConcatOp& op) override {
- _s << "c" << node->getName();
+ void visit(ops::ConcatOp& op) override {
+ _s << "c" << op.getName();
}
std::ostream& _s;
g->replaceOutputNodes({"op3"});
- std::vector<INode::Ref> expectedOutputs{n3};
+ std::vector<Operation*> expectedOutputs{n3};
ASSERT_EQ(g->collectOutputs(), expectedOutputs);
delete g;
};
auto in2 = g->replaceWithInputNode(n2);
- std::vector<INode::Ref> expectedInputs{in2, n1};
+ std::vector<Operation*> expectedInputs{in2, n1};
ASSERT_EQ(g->collectInputs(), expectedInputs);
delete g;
}
#include <gtest/gtest.h>
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
+#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/ConcatOp.h"
namespace {
public:
DumpVisitor(std::ostream& s) : _s(s) {}
- void visit(INode* node, ops::VariableOp& op) override {
- _s << "i" << node->getName();
+ void visit(ops::VariableOp& op) override {
+ _s << "i" << op.getName();
};
- void visit(INode* node, ops::ReluOp& op) override {
- _s << "r" << node->getName();
+ void visit(ops::ReluOp& op) override {
+ _s << "r" << op.getName();
}
- void visit(INode* node, ops::ConcatOp& op) override {
- _s << "c" << node->getName();
+ void visit(ops::ConcatOp& op) override {
+ _s << "c" << op.getName();
}
std::ostream& _s;
* limitations under the License.
*/
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/ShapeInference.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
Shape resultShape{ 10, 1, 10 };
auto input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, Shape{ 10, 2, 5} );
+ input->setOutputShape(0, Shape{ 10, 2, 5} );
- auto n = g.create<ops::ReshapeOp>("reshape");
- n->getOperation()->setInputShape( 0, Shape{10, 2, 5} );
- n->getOperation()->setOutputShape(0, Shape{10, 1, Shape::AUTO_DIM} );
- n->connectInputTo(0, input->getOutput(0));
+ auto op = g.create<ops::ReshapeOp>("reshape");
+ op->setInputShape( 0, Shape{10, 2, 5} );
+ op->setOutputShape(0, Shape{10, 1, Shape::AUTO_DIM} );
+ op->connectInputTo(0, input->getOutput(0));
- si.visit(n, *static_cast<ops::ReshapeOp*>(n->getOperation()));
+ si.visit(*dynamic_cast<ops::ReshapeOp*>(op));
- ASSERT_EQ(resultShape, n->getOperation()->getOutputShape(0));
+ ASSERT_EQ(resultShape, op->getOutputShape(0));
}
TEST(ShapeInferenceTest, ReshapeAutoDimensionVaryRank) {
auto input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, inputShape);
+ input->setOutputShape(0, inputShape);
- auto n = g.create<ops::ReshapeOp>("reshape");
- n->getOperation()->setInputShape( 0, inputShape);
- n->connectInputTo(0, input->getOutput(0));
+ auto op = g.create<ops::ReshapeOp>("reshape");
+ op->setInputShape( 0, inputShape);
+ op->connectInputTo(0, input->getOutput(0));
// test shrink
- n->getOperation()->setOutputShape(0, Shape{10, Shape::AUTO_DIM});
- si.visit(n, *static_cast<ops::ReshapeOp*>(n->getOperation()));
- ASSERT_EQ(resultShapeShrink, n->getOperation()->getOutputShape(0));
+ op->setOutputShape(0, Shape{10, Shape::AUTO_DIM});
+ si.visit(*dynamic_cast<ops::ReshapeOp*>(op));
+ ASSERT_EQ(resultShapeShrink, op->getOutputShape(0));
// test expansion
- n->getOperation()->setOutputShape(0, Shape{5, Shape::AUTO_DIM, 2, 2});
- si.visit(n, *static_cast<ops::ReshapeOp*>(n->getOperation()));
- ASSERT_EQ(resultShapeExpand, n->getOperation()->getOutputShape(0));
+ op->setOutputShape(0, Shape{5, Shape::AUTO_DIM, 2, 2});
+ si.visit(*dynamic_cast<ops::ReshapeOp*>(op));
+ ASSERT_EQ(resultShapeExpand, op->getOutputShape(0));
}
TEST(ShapeInferenceTest, SqueezeTestAllDims) {
Shape expected_shape{2, 4};
auto input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, input_shape);
+ input->setOutputShape(0, input_shape);
auto sq1 = g.create<ops::SqueezeOp>("squeeze_1", std::vector<int32_t>{});
sq1->connectInputTo(0, input->getOutput(0));
g.accept(&si);
- ASSERT_EQ(sq1->getOperation()->getOutputShape(0), expected_shape);
+ ASSERT_EQ(sq1->getOutputShape(0), expected_shape);
}
TEST(ShapeInferenceTest, SqueezeTestSpecificDims) {
Shape expected_shape{1, 2, 4};
auto input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, input_shape);
+ input->setOutputShape(0, input_shape);
auto sq1 = g.create<ops::SqueezeOp>("squeeze_1", std::vector<int32_t>{2});
g.accept(&si);
- ASSERT_EQ(sq1->getOperation()->getOutputShape(0), expected_shape);
+ ASSERT_EQ(sq1->getOutputShape(0), expected_shape);
}
TEST(ShapeInferenceTest, SqueezeTestScalarResult) {
Shape expected_shape{1};
auto input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, input_shape);
+ input->setOutputShape(0, input_shape);
auto sq1 = g.create<ops::SqueezeOp>("squeeze_1", std::vector<int32_t>{});
g.accept(&si);
- ASSERT_EQ(sq1->getOperation()->getOutputShape(0), expected_shape);
+ ASSERT_EQ(sq1->getOutputShape(0), expected_shape);
}
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "core/modelIR/operations/operation.h"
-#include "core/modelIR/operations/ReshapeOp.h"
-#include "core/modelIR/ir_node.h"
-
-#include <gtest/gtest.h>
-
-using namespace nnc::mir;
-
-TEST(IRNode, ConnectionTest) {
-
- auto node1 = Node<ops::ReshapeOp>::createNode("node1", 0);
- auto node2 = Node<ops::ReshapeOp>::createNode("node2", 1);
-
- node2->connectInputTo(0, node1->getOutput(0));
-
- ASSERT_EQ(node1->getId(), node2->getPrevNodes()[0].node->getId());
-
- delete node1;
- delete node2;
-}
* limitations under the License.
*/
-#include "core/modelIR/operations/operation.h"
+#include "core/modelIR/Operation.h"
+#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/ConcatOp.h"
using namespace nnc::mir;
-TEST(OpDescription, InputOutputShapeTest) {
+TEST(Operation, ConnectionTest) {
+
+ auto op1 = new ops::ReshapeOp();
+ op1->setId(0);
+ auto op2 = new ops::ReshapeOp();
+ op2->setId(1);
+
+ op2->connectInputTo(0, op1->getOutput(0));
+
+ ASSERT_EQ(op1->getId(), op2->getPrevNodes()[0].op->getId());
+
+ delete op1;
+ delete op2;
+}
+
+TEST(Operation, InputOutputShapeTest) {
Shape inShape{1,2,3};
Shape outShape{3,2,1};
- OpDescription op(1, 1);
+ ops::SoftmaxOp op(0);
op.setInputShape(0, inShape );
op.setOutputShape(0, outShape );
ASSERT_EQ(outShape, op.getOutputShape(0));
}
-TEST(OpDescription, SoftmaxAxisTest) {
+TEST(Operation, SoftmaxAxisTest) {
Shape inShape{1,2,3};
ops::SoftmaxOp op_1(1);
ASSERT_EQ(op_n3.getAxis(), 0);
}
-TEST(OpDescription, ConcatAxisTest) {
+TEST(Operation, ConcatAxisTest) {
Shape inShape{1,2,3};
ops::ConcatOp op_1(2, 1);
#include <dlfcn.h>
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "support/CommandLine.h"
#include "pass/Pass.h"
#include "pass/PassData.h"
// various headers
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/Tensor.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/ShapeRange.h"
#include "core/modelIR/ShapeInference.h"
*/
/** Creates graph with one operation generated by opGen function and returns this operation node*/
-mir::INode *fillGraph(mir::Graph &g, function<mir::INode *(mir::Graph &g)> opGen,
- const vector<unique_ptr<mir::TensorVariant>> &inputNTensors)
+mir::Operation* fillGraph(mir::Graph& g, function<mir::Operation*(mir::Graph& g)> opGen,
+ const vector<unique_ptr<mir::TensorVariant>>& inputNTensors)
{
- // Create operation node
- mir::INode *opNode = opGen(g);
+ // Create operation
+ mir::Operation* op = opGen(g);
- int numInputs = opNode->getPrevNodes().size();
+ int numInputs = op->getPrevNodes().size();
assert(inputNTensors.size() == static_cast<size_t>(numInputs));
for (int i = 0; i < numInputs; ++i)
{
// Create i-th input node
- auto inputNode = g.create<mir::ops::VariableOp>("x" + std::to_string(i));
+ auto inputOp = g.create<mir::ops::VariableOp>("x" + std::to_string(i));
// Connect i-th operation input to i-th input node
- opNode->connectInputTo(i, inputNode->getOutput(0));
+ op->connectInputTo(i, inputOp->getOutput(0));
// Set input shape
- inputNode->getOperation()->setOutputShape(0, inputNTensors[i]->getShape());
+ inputOp->setOutputShape(0, inputNTensors[i]->getShape());
}
// Mark outputs
- g.markOutput(opNode);
+ g.markOutput(op);
// Run shape inference
mir::ShapeInference shapeInferencer;
g.accept(&shapeInferencer);
- return opNode;
+ return op;
}
/** Fills NNC Shape object with data from src container*/
* This function creates test graph, runs interpeter, specifies artifact operation and compares results
*/
template <class TestFunc, class ...Args>
-void createAndRunTestGraph(function<mir::INode *(mir::Graph &)> opGenerator, TestFunc artifactOperation,
+void createAndRunTestGraph(function<mir::Operation*(mir::Graph &)> opGenerator, TestFunc artifactOperation,
const vector<unique_ptr<mir::TensorVariant>> &inputNTensors, const Args &...inputATensors)
{
mir::Graph g;
- mir::INode *actualOperation = fillGraph(g, opGenerator, inputNTensors);
+ mir::Operation *actualOperation = fillGraph(g, opGenerator, inputNTensors);
// serialize data for soft backend operation
list<OpDescr> inferenceSequence;
OpDescr opDescr;
- opDescr._node = actualOperation;
+ opDescr._op = actualOperation;
inferenceSequence.push_back(opDescr);
Serializer serializer;
serializer.serialize(inferenceSequence);
auto opGenerator = [nOutputShape](mir::Graph &g)
{
auto op = g.create<mir::ops::ReshapeOp>("y");
- op->getOperation()->setOutputShape(0, nOutputShape);
+ op->setOutputShape(0, nOutputShape);
return op;
};
cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
nnc::mir::Graph g;
- INode *input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, Shape({1,2,3,4}));
- INode *output = g.create<ops::ReluOp>("output");
+ Operation* input = g.create<ops::VariableOp>("input");
+ input->setOutputShape(0, Shape({1,2,3,4}));
+ Operation* output = g.create<ops::ReluOp>("output");
output->connectInputTo(0, input->getOutput(0));
// test that generator creates output dir and files
#include "support/CommandLine.h"
#include "option/Options.h"
#include "passes/caffe_frontend/caffe_importer.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/IrDotDumper.h"
#include "core/modelIR/ShapeInference.h"
#include "pass/PassException.h"
#include "pass/PassException.h"
#include "option/Options.h"
#include "passes/tflite_frontend/tflite_importer.h"
-#include "core/modelIR/graph.h"
+#include "core/modelIR/Graph.h"
#include "core/modelIR/IrDotDumper.h"
#include "core/modelIR/ShapeInference.h"