From 3ebf246862d3d9a51552977f18eba71df8b78291 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 14 Nov 2018 20:47:36 +0300 Subject: [PATCH] [nnc] Make the class `Graph` intrusive (#2261) * Remove `INode`, `AbstractNode`, `Node`, `OpDescription` classes. * Add `Operation` class, derive all `Op`s from it. * Modify `Graph` to work with `Operation`s instead of `Node`. Signed-off-by: Sergei Barannikov --- contrib/nnc/core/CMakeLists.txt | 5 +- contrib/nnc/core/modelIR/{graph.cpp => Graph.cpp} | 118 +++++------ contrib/nnc/core/modelIR/IrDotDumper.cpp | 176 ++++++++-------- contrib/nnc/core/modelIR/Operation.cpp | 90 +++++++++ contrib/nnc/core/modelIR/ShapeInference.cpp | 100 ++++------ contrib/nnc/core/modelIR/Visitor.cpp | 4 +- contrib/nnc/core/modelIR/ir_dot_builder.cpp | 16 +- contrib/nnc/core/modelIR/ir_node.cpp | 61 ------ contrib/nnc/core/modelIR/operation.cpp | 55 ----- .../nnc/include/core/modelIR/{graph.h => Graph.h} | 53 ++--- contrib/nnc/include/core/modelIR/IrDotDumper.h | 44 ++-- contrib/nnc/include/core/modelIR/Operation.h | 93 +++++++++ contrib/nnc/include/core/modelIR/ShapeInference.h | 46 ++--- contrib/nnc/include/core/modelIR/Visitor.h | 29 ++- contrib/nnc/include/core/modelIR/ir_dot_builder.h | 7 +- contrib/nnc/include/core/modelIR/ir_node.h | 142 ------------- .../include/core/modelIR/operations/BatchNormOp.h | 6 +- .../include/core/modelIR/operations/BiasAddOp.h | 7 +- .../include/core/modelIR/operations/CappedReluOp.h | 6 +- .../nnc/include/core/modelIR/operations/ConcatOp.h | 6 +- .../nnc/include/core/modelIR/operations/Conv2DOp.h | 6 +- .../include/core/modelIR/operations/Deconv2DOp.h | 6 +- .../core/modelIR/operations/DepthwiseConv2DOp.h | 6 +- .../include/core/modelIR/operations/DropoutOp.h | 6 +- .../core/modelIR/operations/ElementwiseOp.h | 6 +- .../nnc/include/core/modelIR/operations/EluOp.h | 6 +- .../core/modelIR/operations/FullyConnectedOp.h | 7 +- .../nnc/include/core/modelIR/operations/PadOp.h | 6 +- .../nnc/include/core/modelIR/operations/PoolOp.h | 6 +- .../nnc/include/core/modelIR/operations/ReluOp.h | 6 +- .../include/core/modelIR/operations/ReshapeOp.h | 6 +- .../nnc/include/core/modelIR/operations/ScaleOp.h | 7 +- .../include/core/modelIR/operations/SoftmaxOp.h | 6 +- .../include/core/modelIR/operations/SqueezeOp.h | 7 +- .../nnc/include/core/modelIR/operations/TanhOp.h | 6 +- .../include/core/modelIR/operations/VariableOp.h | 6 +- .../include/core/modelIR/operations/operation.h | 56 ------ .../core/modelIR/operations/operations.lst.h | 46 ++--- contrib/nnc/include/pass/PassData.h | 2 +- .../passes/acl_soft_backend/AclCppOpGenerator.h | 62 +++--- .../include/passes/caffe_frontend/caffe_importer.h | 4 +- .../include/passes/common_frontend/nn_importer.h | 2 +- .../nnc/include/passes/interpreter/Interpreter.h | 53 +++-- .../include/passes/soft_backend/BaseGenerator.h | 2 +- .../passes/tflite_frontend/tflite_importer.h | 7 +- .../passes/acl_soft_backend/AclCppOpGenerator.cpp | 179 ++++++++--------- .../nnc/passes/caffe_frontend/caffe_importer.cpp | 10 +- .../nnc/passes/caffe_frontend/caffe_op_creator.cpp | 88 ++++---- .../nnc/passes/caffe_frontend/caffe_op_creator.h | 86 ++++---- contrib/nnc/passes/interpreter/Interpreter.cpp | 222 ++++++++++----------- .../nnc/passes/interpreter/interpreter_pass.cpp | 10 +- .../nnc/passes/onnx_frontend/ONNXImporterImpl.cpp | 20 +- .../nnc/passes/onnx_frontend/ONNXImporterImpl.h | 6 +- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 34 ++-- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h | 31 ++- contrib/nnc/passes/soft_backend/CPPGenerator.cpp | 2 +- contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp | 151 +++++++------- contrib/nnc/passes/soft_backend/ModelAnalyzer.h | 52 ++--- contrib/nnc/passes/soft_backend/SBSerializer.cpp | 61 ++---- contrib/nnc/passes/soft_backend/SBSerializer.h | 43 ++-- .../nnc/passes/tflite_frontend/tflite_importer.cpp | 8 +- .../passes/tflite_frontend/tflite_op_creator.cpp | 60 +++--- .../nnc/passes/tflite_frontend/tflite_op_creator.h | 51 ++--- contrib/nnc/tests/interpreter/graph_creator.cpp | 32 +-- contrib/nnc/tests/interpreter/graph_creator.h | 2 +- contrib/nnc/tests/interpreter/op_test.cpp | 2 +- contrib/nnc/tests/soft_backend/CompileCPP.cpp | 12 +- contrib/nnc/unittests/core/CMakeLists.txt | 3 +- contrib/nnc/unittests/core/Graph.cpp | 20 +- contrib/nnc/unittests/core/NodeReplacer.cpp | 16 +- contrib/nnc/unittests/core/ShapeInference.cpp | 48 ++--- contrib/nnc/unittests/core/ir_node.cpp | 36 ---- contrib/nnc/unittests/core/operation.cpp | 26 ++- contrib/nnc/unittests/pass/PassManagerTest.cpp | 2 +- .../nnc/unittests/soft_backend/CPPOperations.cpp | 30 +-- contrib/nnc/unittests/soft_backend/Generator.cpp | 6 +- contrib/nnc/utils/caffe_dot_dumper/model_dump.cpp | 2 +- .../nnc/utils/tflite_dot_dumper/sanity_check.cpp | 2 +- 78 files changed, 1252 insertions(+), 1498 deletions(-) rename contrib/nnc/core/modelIR/{graph.cpp => Graph.cpp} (51%) create mode 100644 contrib/nnc/core/modelIR/Operation.cpp delete mode 100644 contrib/nnc/core/modelIR/ir_node.cpp delete mode 100644 contrib/nnc/core/modelIR/operation.cpp rename contrib/nnc/include/core/modelIR/{graph.h => Graph.h} (67%) create mode 100644 contrib/nnc/include/core/modelIR/Operation.h delete mode 100644 contrib/nnc/include/core/modelIR/ir_node.h delete mode 100644 contrib/nnc/include/core/modelIR/operations/operation.h delete mode 100644 contrib/nnc/unittests/core/ir_node.cpp diff --git a/contrib/nnc/core/CMakeLists.txt b/contrib/nnc/core/CMakeLists.txt index cc5889d..3b69451 100644 --- a/contrib/nnc/core/CMakeLists.txt +++ b/contrib/nnc/core/CMakeLists.txt @@ -1,10 +1,9 @@ -set(SOURCES "modelIR/graph.cpp" +set(SOURCES "modelIR/Graph.cpp" "modelIR/Index.cpp" "modelIR/ir_dot_builder.cpp" "modelIR/IrDotDumper.cpp" "modelIR/ir_dot_node_info.cpp" - "modelIR/ir_node.cpp" - "modelIR/operation.cpp" + "modelIR/Operation.cpp" "modelIR/Shape.cpp" "modelIR/ShapeInference.cpp" "modelIR/Tensor.cpp" diff --git a/contrib/nnc/core/modelIR/graph.cpp b/contrib/nnc/core/modelIR/Graph.cpp similarity index 51% rename from contrib/nnc/core/modelIR/graph.cpp rename to contrib/nnc/core/modelIR/Graph.cpp index e785ea5..1eab752 100644 --- a/contrib/nnc/core/modelIR/graph.cpp +++ b/contrib/nnc/core/modelIR/Graph.cpp @@ -18,49 +18,41 @@ #include #include -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" namespace nnc { namespace mir { /** - * @brief replace all usages of node `node` with node `with` + * @brief replace all usages of operation `op` with node `with` * (i.e. all references in previous/next nodes ) - * @param inode a node to replace - * @param with a node to use as a replacement + * @param op the operation to replace + * @param with the operation to use as a replacement */ -static void replaceUsages(const INode* node, INode* with) { - auto with_node = dynamic_cast(with); - assert(with_node); +static void replaceUsages(const Operation* op, Operation* with) { //For each output replace prev references to `node` by `with` - for (auto out : node->getNextNodes()) { - auto anode = dynamic_cast(out); - assert(anode && "Unexpected node type"); - - for (auto& prev : anode->getMutablePrevNodes()) { - if (prev.node == node) - prev.node = with; + for (auto out : op->getNextNodes()) { + for (auto& prev : out->getMutablePrevNodes()) { + if (prev.op == op) + prev.op = with; } } - with_node->getMutableNextNodes() = node->getNextNodes(); + with->getMutableNextNodes() = op->getNextNodes(); //For each input replace next references to `node` by `with` - for (auto& in : node->getPrevNodes()) { - auto anode = dynamic_cast(in.node); - assert(anode && "Unexpected node type"); - - for (auto& next : anode->getMutableNextNodes()) { - if (next == node) + for (auto& in : op->getPrevNodes()) { + for (auto& next : in.op->getMutableNextNodes()) { + if (next == op) next = with; } } - with_node->getMutablePrevNodes() = node->getPrevNodes(); + with->getMutablePrevNodes() = op->getPrevNodes(); } -INode::Ref Graph::getInput(const std::string& name) { +Operation* Graph::getInput(const std::string& name) { auto it = _inputs.find(name); if (it == _inputs.end()) return nullptr; @@ -68,7 +60,7 @@ INode::Ref Graph::getInput(const std::string& name) { return it->second; } -INode::Ref Graph::getOutput(const std::string& name) { +Operation* Graph::getOutput(const std::string& name) { auto it = _outputs.find(name); if (it == _outputs.end()) return nullptr; @@ -77,12 +69,12 @@ INode::Ref Graph::getOutput(const std::string& name) { } void Graph::accept(IVisitor* visitor) { - std::deque q; - std::set known_nodes; + std::deque q; + std::set known_ops; for (const auto& e : _inputs) { q.push_back(e.second); - known_nodes.insert(e.second); //Consider all input _nodes resolved by default + known_ops.insert(e.second); //Consider all input _ops resolved by default } //BFS @@ -91,16 +83,16 @@ void Graph::accept(IVisitor* visitor) { q.pop_front(); n->accept(visitor); for (auto out : n->getNextNodes()) { - if (known_nodes.count(out) == 0) { + if (known_ops.count(out) == 0) { bool allInputsResolved = true; for (auto in : out->getPrevNodes()) { - if (known_nodes.count(in.node) == 0) { + if (known_ops.count(in.op) == 0) { allInputsResolved = false; } } if (allInputsResolved) { - known_nodes.insert(out); + known_ops.insert(out); q.push_back(out); } } @@ -109,89 +101,87 @@ void Graph::accept(IVisitor* visitor) { } Graph::~Graph() { - for (auto& node : _nodes) { + for (auto& node : _ops) { delete node; } } -void Graph::markOutput(INode::Ref node) { - auto it = _outputs.find(node->getName()); +void Graph::markOutput(Operation* op) { + auto it = _outputs.find(op->getName()); if (it != _outputs.end()) { throw std::runtime_error("Output node with same name already exists"); } - _outputs[node->getName()] = node; + _outputs[op->getName()] = op; } -std::vector Graph::collectInputs() { - std::vector res; +std::vector Graph::collectInputs() { + std::vector res; for (auto& e : _inputs) { res.emplace_back(e.second); } return res; } -std::vector Graph::collectOutputs() { - std::vector res; +std::vector Graph::collectOutputs() { + std::vector res; for (auto& e : _outputs) { res.emplace_back(e.second); } return res; } -void Graph::replaceNode(const INode* node, INode* with) { - auto in = _inputs.find(node->getName()); +void Graph::replaceNode(const Operation* op, Operation* with) { + auto in = _inputs.find(op->getName()); if (in != _inputs.end()) { (*in).second = with; } - auto out_it = _outputs.find(node->getName()); + auto out_it = _outputs.find(op->getName()); if (out_it != _outputs.end()) { (*out_it).second = with; } - replaceUsages(node, with); + replaceUsages(op, with); - _nodes.erase(std::remove_if(_nodes.begin(), _nodes.end(), [node] (INode::Ref n) { - return n == node; - }), _nodes.end()); + _ops.erase(std::remove_if(_ops.begin(), _ops.end(), [op] (Operation* n) { + return n == op; + }), _ops.end()); } -Node* Graph::replaceWithInputNode(const INode* node) { - auto in = create(node->getName()); - assert(node->getOperation()->getNumOutputs() <= 1 +ops::VariableOp* Graph::replaceWithInputNode(const Operation* op) { + auto in = create(op->getName()); + assert(op->getNumOutputs() <= 1 && "Only operations with single output value can be replaced with input node"); - assert(node->getNextNodes().size() <= 1 + assert(op->getNextNodes().size() <= 1 && "Node with multiple outputs cannot be changed into input"); - replaceNode(node, in); + replaceNode(op, in); //replaceNode adds all connections of original node, //but for input node we don't need input connections - // - //cast is safe since we know graph creates only AbstractNode(s) - static_cast(in)->getMutablePrevNodes().clear(); + in->getMutablePrevNodes().clear(); - delete node; + delete op; - return static_cast*>(in); + return dynamic_cast(in); } void Graph::replaceInputNodes(const std::vector& new_inputs) { - std::vector nodes_to_replace; + std::vector ops_to_replace; std::set new_input_set(new_inputs.begin(), new_inputs.end()); - for (auto& n : _nodes) { - if (new_input_set.count(n->getName()) != 0) { - nodes_to_replace.push_back(n); + for (auto& op : _ops) { + if (new_input_set.count(op->getName()) != 0) { + ops_to_replace.push_back(op); } } _inputs.clear(); - for (auto& n : nodes_to_replace) { - replaceWithInputNode(n); + for (auto& op : ops_to_replace) { + replaceWithInputNode(op); } } @@ -200,9 +190,9 @@ void Graph::replaceOutputNodes(const std::vector& new_outputs) { std::set new_outputs_set(new_outputs.begin(), new_outputs.end()); - for (auto& n : _nodes) { - if (new_outputs_set.count(n->getName()) != 0) { - markOutput(n); + for (auto& op : _ops) { + if (new_outputs_set.count(op->getName()) != 0) { + markOutput(op); } } } diff --git a/contrib/nnc/core/modelIR/IrDotDumper.cpp b/contrib/nnc/core/modelIR/IrDotDumper.cpp index a75750e..dc2ef09 100644 --- a/contrib/nnc/core/modelIR/IrDotDumper.cpp +++ b/contrib/nnc/core/modelIR/IrDotDumper.cpp @@ -17,13 +17,10 @@ #include #include "core/modelIR/IrDotDumper.h" -namespace nnc -{ -namespace mir -{ +namespace nnc { +namespace mir { -static std::vector getInputShapes(OpDescription &op) -{ +static std::vector getInputShapes(Operation& op) { std::vector shapes; for (std::size_t i = 0; i < op.getNumInputs(); ++i) { @@ -32,8 +29,7 @@ static std::vector getInputShapes(OpDescription &op) return shapes; } -static std::vector getOutputShapes(const OpDescription &op) -{ +static std::vector getOutputShapes(const Operation& op) { std::vector shapes; for (std::size_t i = 0; i < op.getNumOutputs(); ++i) { @@ -42,84 +38,76 @@ static std::vector getOutputShapes(const OpDescription &op) return shapes; } -void IrDotDumper::visit(INode *node, ops::BiasAddOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("BiasAdd", node->getName()) +void IrDotDumper::visit(ops::BiasAddOp &op) { + auto nodeInfo = DotIrNodeInfo().withType("BiasAdd", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withKernelShape(op.getWeights().getShape()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::CappedReluOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("CappedRelu", node->getName()) +void IrDotDumper::visit(ops::CappedReluOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("CappedRelu", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withMisc("Cap", op.getCap()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::ConcatOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("Concat", node->getName()) +void IrDotDumper::visit(ops::ConcatOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("Concat", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withMisc("Axis", op.getAxis()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::Conv2DOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("Conv2D", node->getName()) +void IrDotDumper::visit(ops::Conv2DOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("Conv2D", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withKernelShape(op.getKernel().getShape()) .withPadType(op.getPaddingType()) .withStride(op.getStrides()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::DepthwiseConv2DOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("DepthwiseConv2D", node->getName()) +void IrDotDumper::visit(ops::DepthwiseConv2DOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("DepthwiseConv2D", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withKernelShape(op.getKernel().getShape()) .withPadType(op.getPaddingType()) .withStride(op.getStrides()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::FullyConnectedOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("FullyConnected", node->getName()) +void IrDotDumper::visit(ops::FullyConnectedOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("FullyConnected", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withKernelShape(op.getWeights().getShape()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::SoftmaxOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("Softmax", node->getName()) +void IrDotDumper::visit(ops::SoftmaxOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("Softmax", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withMisc("Axis", op.getAxis()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::PoolOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("Pool2D", node->getName()) +void IrDotDumper::visit(ops::PoolOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("Pool2D", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withShape("PoolWindow", op.getWindowShape()) @@ -127,120 +115,112 @@ void IrDotDumper::visit(INode *node, ops::PoolOp &op) .withPoolType(op.getPoolingType()) .withStride(op.getStrides()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::ReluOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("ReLU", node->getName()) +void IrDotDumper::visit(ops::ReluOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("ReLU", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::ReshapeOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("Reshape", node->getName()) +void IrDotDumper::visit(ops::ReshapeOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("Reshape", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::VariableOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("Input", node->getName()) +void IrDotDumper::visit(ops::VariableOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("Input", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::BatchNormOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("BatchNorm", node->getName()) +void IrDotDumper::visit(ops::BatchNormOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("BatchNorm", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withMisc("Moving Average Fraction", op.getMovingAvgFraction()) .withMisc("Eps", op.getEps()) .withMisc("Spatial", op.getSpatial()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::ScaleOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("ScaleOp", node->getName()) +void IrDotDumper::visit(ops::ScaleOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("ScaleOp", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withShape("Scale Tensor", op.getWeights().getShape()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::DropoutOp &op) -{ - auto nodeInfo = DotIrNodeInfo().withType("DropoutOp", node->getName()) +void IrDotDumper::visit(ops::DropoutOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("DropoutOp", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)) .withMisc("DropRate", op.getRate()); - dotBuilder.updateWithNode(node, nodeInfo); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode *node, ops::DeConv2DOp &op) { - auto node_info = DotIrNodeInfo().withType("DeConv2D", node->getName()) - .withInShapes(getInputShapes(op)) - .withOutShapes(getOutputShapes(op)) - .withKernelShape(op.getKernel().getShape()) - .withPadType(op.getPaddingType()) - .withStride(op.getStrides()); +void IrDotDumper::visit(ops::DeConv2DOp& op) { + auto node_info = DotIrNodeInfo().withType("DeConv2D", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)) + .withKernelShape(op.getKernel().getShape()) + .withPadType(op.getPaddingType()) + .withStride(op.getStrides()); - dotBuilder.updateWithNode(node, node_info); + dotBuilder.updateWithOp(&op, node_info); } -void IrDotDumper::visit(INode *node, ops::EluOp &op) { - auto node_info = DotIrNodeInfo().withType("EluOp", node->getName()) - .withInShapes(getInputShapes(op)) - .withOutShapes(getOutputShapes(op)) - .withMisc("Alpha", op.getAlpha()); - - dotBuilder.updateWithNode(node, node_info); +void IrDotDumper::visit(ops::EluOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("EluOp", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)) + .withMisc("Alpha", op.getAlpha()); } -void IrDotDumper::visit(INode *node, ops::TanhOp &op) { - auto node_info = DotIrNodeInfo().withType("TanhOp", node->getName()) - .withInShapes(getInputShapes(op)) - .withOutShapes(getOutputShapes(op)); +void IrDotDumper::visit(ops::TanhOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("TanhOp", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)); - dotBuilder.updateWithNode(node, node_info); + dotBuilder.updateWithOp(&op, nodeInfo); } -void mir::IrDotDumper::visit(INode *node, ops::ElementwiseOp &op) { - auto node_info = DotIrNodeInfo().withType("ElementwiseOp", node->getName()) - .withInShapes(getInputShapes(op)) - .withOutShapes(getOutputShapes(op)) - .withMisc("Operation", ( int ) op.getOpType()); +void mir::IrDotDumper::visit(ops::ElementwiseOp& op) { + auto nodeInfo = DotIrNodeInfo().withType("TanhOp", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)) + .withMisc("Operation", ( int ) op.getOpType()); - dotBuilder.updateWithNode(node, node_info); + dotBuilder.updateWithOp(&op, nodeInfo); } -void IrDotDumper::visit(INode* node, ops::SqueezeOp& op) { - auto node_info = DotIrNodeInfo().withType("SqueezeOp", node->getName()) - .withInShapes(getInputShapes(op)) - .withOutShapes(getOutputShapes(op)); +void IrDotDumper::visit(ops::SqueezeOp& op) { + auto node_info = DotIrNodeInfo().withType("SqueezeOp", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)); for (auto dim : op.getDimsToSqueeze()) { node_info.withMisc("SqueezeDim", dim); } - dotBuilder.updateWithNode(node, node_info); + dotBuilder.updateWithOp(&op, node_info); } -void mir::IrDotDumper::visit(INode* node, ops::PadOp& op) { - auto node_info = DotIrNodeInfo().withType("PadOp", node->getName()) +void mir::IrDotDumper::visit(ops::PadOp& op) { + auto node_info = DotIrNodeInfo().withType("PadOp", op.getName()) .withInShapes(getInputShapes(op)) .withOutShapes(getOutputShapes(op)); - dotBuilder.updateWithNode(node, node_info); + dotBuilder.updateWithOp(&op, node_info); } } // namespace mir diff --git a/contrib/nnc/core/modelIR/Operation.cpp b/contrib/nnc/core/modelIR/Operation.cpp new file mode 100644 index 0000000..3bc8e28 --- /dev/null +++ b/contrib/nnc/core/modelIR/Operation.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "core/modelIR/Operation.h" +#include "core/modelIR/operations/FullyConnectedOp.h" +#include "core/modelIR/operations/SoftmaxOp.h" +#include "core/modelIR/operations/CappedReluOp.h" +#include "core/modelIR/operations/DepthwiseConv2DOp.h" +#include "core/modelIR/operations/Conv2DOp.h" +#include "core/modelIR/operations/Deconv2DOp.h" +#include "core/modelIR/operations/PoolOp.h" +#include "core/modelIR/operations/VariableOp.h" +#include "core/modelIR/operations/ReluOp.h" +#include "core/modelIR/operations/EluOp.h" +#include "core/modelIR/operations/ConcatOp.h" +#include "core/modelIR/operations/BiasAddOp.h" +#include "core/modelIR/operations/BatchNormOp.h" +#include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/DropoutOp.h" +#include "core/modelIR/operations/TanhOp.h" +#include "core/modelIR/operations/ElementwiseOp.h" +#include "core/modelIR/operations/SqueezeOp.h" +#include "core/modelIR/operations/ReshapeOp.h" +#include "core/modelIR/operations/PadOp.h" + +#include + +namespace nnc { +namespace mir { + +Operation::Operation(Type type, std::size_t max_inputs, std::size_t max_outputs) + : _type(type), _max_inputs(max_inputs), _max_outputs(max_outputs) { + _inputs.resize(max_inputs); +} + +void Operation::connectInputTo(int inputIndex, const IODescriptor& descriptor) { + descriptor.op->_outputs.emplace_back(this); + _inputs[inputIndex] = descriptor; +} + +const IODescriptor Operation::getOutput(std::size_t index) { + return IODescriptor{.op = this, .index = index}; +} + +const Shape& Operation::getInputShape(std::size_t index) const { + assert(index < getNumInputs()); + return _inputShapes.at(index); +} + +void Operation::setInputShape(std::size_t index, const Shape& shape) { + assert(index < getNumInputs()); + _inputShapes[index] = shape; +} + +const Shape& Operation::getOutputShape(std::size_t index) const { + assert(index < getNumOutputs()); + return _outputShapes.at(index); +} + +void Operation::setOutputShape(std::size_t index, const Shape& shape) { + assert(index < getNumOutputs()); + _outputShapes[index] = shape; +} + +void Operation::accept(IVisitor* v) { + switch (getType()) { +#define HANDLE_OP(OpType, OpClass) \ + case Type::OpType: \ + v->visit(static_cast(*this)); \ + break; +#include "core/modelIR/operations/operations.lst.h" +#undef HANDLE_OP + } +} + +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/ShapeInference.cpp b/contrib/nnc/core/modelIR/ShapeInference.cpp index a294c18..fd68663 100644 --- a/contrib/nnc/core/modelIR/ShapeInference.cpp +++ b/contrib/nnc/core/modelIR/ShapeInference.cpp @@ -48,7 +48,7 @@ namespace mir using nnc::mir::Shape; template -void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &outShape) +void fillHWShapesForPaddedOperations(Op& op, const Shape &windowShape, Shape &outShape) { auto &strides = op.getStrides(); auto &inShape = op.getInputShape(0); @@ -96,9 +96,8 @@ void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &ou op.setPadding(inRank - 1, 0); } -void ShapeInference::visit(INode::Ref node, ops::ConcatOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::ConcatOp& op) { + fillInputShapes(op); int32_t axis = op.getAxis(); Shape outShape; @@ -118,9 +117,8 @@ void ShapeInference::visit(INode::Ref node, ops::ConcatOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(INode::Ref node, ops::Conv2DOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::Conv2DOp& op) { + fillInputShapes(op); Shape outShape; auto &kernel = op.getKernel(); @@ -132,38 +130,32 @@ void ShapeInference::visit(INode::Ref node, ops::Conv2DOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(INode::Ref node, ops::VariableOp &op) -{ +void ShapeInference::visit(ops::VariableOp& op) { (void)op; - (void)node; // No need to do anything for inputs. These should be set by user } -void ShapeInference::fillInputShapes(INode::Ref node, OpDescription &op) -{ +void ShapeInference::fillInputShapes(Operation& op) { size_t i = 0; - for (auto &in : node->getPrevNodes()) + for (auto &in : op.getPrevNodes()) { - const Shape &inShape = in.node->getOperation()->getOutputShape(in.index); + const Shape &inShape = in.op->getOutputShape(in.index); op.setInputShape(i++, inShape); } } -void ShapeInference::visit(INode::Ref node, ops::ReluOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::ReluOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::SoftmaxOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::SoftmaxOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::PoolOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::PoolOp& op) { + fillInputShapes(op); Shape outShape; auto &windowShape = op.getWindowShape(); @@ -178,9 +170,8 @@ void ShapeInference::visit(INode::Ref node, ops::PoolOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(INode::Ref node, ops::FullyConnectedOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::FullyConnectedOp& op) { + fillInputShapes(op); const Shape &inShape = op.getInputShape(0); const Shape &wShape = op.getWeights().getShape(); const int32_t weightsRank = wShape.rank(); @@ -201,15 +192,13 @@ void ShapeInference::visit(INode::Ref node, ops::FullyConnectedOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(INode::Ref node, ops::CappedReluOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::CappedReluOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::DepthwiseConv2DOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::DepthwiseConv2DOp& op) { + fillInputShapes(op); Shape outShape; auto &kernelShape = op.getKernel().getShape(); @@ -227,16 +216,14 @@ void ShapeInference::visit(INode::Ref node, ops::DepthwiseConv2DOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(INode::Ref node, ops::BiasAddOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::BiasAddOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::ReshapeOp &op) -{ +void ShapeInference::visit(ops::ReshapeOp& op) { // Reshape should have it's output shape filled by importer/user - fillInputShapes(node, op); + fillInputShapes(op); auto& inShape = op.getInputShape(0); auto outShape = op.getOutputShape(0); @@ -260,23 +247,22 @@ void ShapeInference::visit(INode::Ref node, ops::ReshapeOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(INode::Ref node, ops::ScaleOp &op) -{ - fillInputShapes(node, op); +void ShapeInference::visit(ops::ScaleOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::DropoutOp &op) { - fillInputShapes(node, op); +void ShapeInference::visit(ops::DropoutOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::BatchNormOp &op) { - fillInputShapes(node, op); +void ShapeInference::visit(ops::BatchNormOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::DeConv2DOp &op) { +void ShapeInference::visit(ops::DeConv2DOp& op) { /** see https://github.com/tensorflow/tensorflow/issues/2118 for reason why the output shape is what it is. @@ -284,7 +270,7 @@ void ShapeInference::visit(INode::Ref node, ops::DeConv2DOp &op) { output = input * stride + filter - stride # VALID output = input * stride - stride + 1 # SAME */ - fillInputShapes(node, op); + fillInputShapes(op); Shape out_shape; Shape in_shape = op.getInputShape(0); @@ -325,23 +311,23 @@ void ShapeInference::visit(INode::Ref node, ops::DeConv2DOp &op) { op.setOutputShape(0, out_shape); } -void ShapeInference::visit(INode *node, ops::EluOp &op) { - fillInputShapes(node, op); +void ShapeInference::visit(ops::EluOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::TanhOp &op) { - fillInputShapes(node, op); +void ShapeInference::visit(ops::TanhOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode::Ref node, ops::ElementwiseOp &op) { - fillInputShapes(node, op); +void ShapeInference::visit(ops::ElementwiseOp& op) { + fillInputShapes(op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(INode* node, ops::SqueezeOp& op) { - fillInputShapes(node, op); +void ShapeInference::visit(ops::SqueezeOp& op) { + fillInputShapes(op); assert(op.getNumInputs() == 1); const auto& input_shape = op.getInputShape(0); @@ -388,12 +374,12 @@ void ShapeInference::visit(INode* node, ops::SqueezeOp& op) { op.setOutputShape(0, output_shape); } -void ShapeInference::visit(INode* node, ops::PadOp& op) { +void ShapeInference::visit(ops::PadOp& op) { /** padded size of each dimension D of the output is: paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1] */ - fillInputShapes(node, op); + fillInputShapes(op); const Shape& in_shape = op.getInputShape(0); Shape out_shape; diff --git a/contrib/nnc/core/modelIR/Visitor.cpp b/contrib/nnc/core/modelIR/Visitor.cpp index a754682..9216053 100644 --- a/contrib/nnc/core/modelIR/Visitor.cpp +++ b/contrib/nnc/core/modelIR/Visitor.cpp @@ -20,9 +20,9 @@ namespace nnc { namespace mir { -#define OP_TYPE(OpType) void Visitor::visit(INode*, ops::OpType&) {} +#define HANDLE_OP(OpType, OpClass) void Visitor::visit(ops::OpClass&) {} #include "core/modelIR/operations/operations.lst.h" -#undef OP_TYPE +#undef HANDLE_OP } // namespace mir } // namespace nnc diff --git a/contrib/nnc/core/modelIR/ir_dot_builder.cpp b/contrib/nnc/core/modelIR/ir_dot_builder.cpp index 6d4ffb5..2337527 100644 --- a/contrib/nnc/core/modelIR/ir_dot_builder.cpp +++ b/contrib/nnc/core/modelIR/ir_dot_builder.cpp @@ -21,12 +21,12 @@ namespace nnc namespace mir { -void IrDotBuilder::updateWithNode(INode *node, const DotIrNodeInfo &irNodeInfo) +void IrDotBuilder::updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo) { - addNode(node, irNodeInfo); - for (auto &prev : node->getPrevNodes()) + addNode(op, irNodeInfo); + for (auto &prev : op->getPrevNodes()) { - addEdge(prev.node, node); + addEdge(prev.op, op); } } @@ -35,14 +35,14 @@ void IrDotBuilder::writeDot(std::ostream &os) os << "digraph D {" << std::endl << dot.str() << std::endl << "}" << std::endl; } -void IrDotBuilder::addNode(INode *node, const DotIrNodeInfo &irNode) +void IrDotBuilder::addNode(Operation* op, const DotIrNodeInfo& irNode) { - dot << node->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl; + dot << op->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl; } -void IrDotBuilder::addEdge(INode *node1, INode *node2) +void IrDotBuilder::addEdge(Operation* op1, Operation* op2) { - dot << node1->getId() << " -> " << node2->getId() << ";" << std::endl; + dot << op1->getId() << " -> " << op2->getId() << ";" << std::endl; } } // namespace mir diff --git a/contrib/nnc/core/modelIR/ir_node.cpp b/contrib/nnc/core/modelIR/ir_node.cpp deleted file mode 100644 index 98fa2a5..0000000 --- a/contrib/nnc/core/modelIR/ir_node.cpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "core/modelIR/ir_node.h" - -namespace nnc -{ -namespace mir -{ - -const std::vector &AbstractNode::getNextNodes() const { return _outputs; } - -const std::vector &AbstractNode::getPrevNodes() const -{ - return _inputs; -} - -void AbstractNode::connectInputTo(const int inputIndex, const IODescriptor &descriptor) -{ - AbstractNode *buf_ptr = dynamic_cast(descriptor.node); - assert(buf_ptr); - buf_ptr->addNextNode(this); - _inputs[inputIndex] = descriptor; -} - -void AbstractNode::addNextNode(INode::Ref const node) { _outputs.emplace_back(node); } - -const INode::IODescriptor AbstractNode::getOutput(size_t index) -{ - return IODescriptor{.node = this, .index = index}; -} - -AbstractNode::AbstractNode(size_t num_inputs) { - _inputs.resize(num_inputs); -} - -std::vector& AbstractNode::getMutablePrevNodes() { - return _inputs; -} - -std::vector& AbstractNode::getMutableNextNodes() { - return _outputs; -} - -} // namespace mir -} // namespace nnc diff --git a/contrib/nnc/core/modelIR/operation.cpp b/contrib/nnc/core/modelIR/operation.cpp deleted file mode 100644 index 8eea4c7..0000000 --- a/contrib/nnc/core/modelIR/operation.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "core/modelIR/operations/operation.h" - -namespace nnc -{ -namespace mir -{ - -const Shape &OpDescription::getInputShape(const size_t index) const { - assert(index < getNumInputs()); - return _inputShapes.at(index); -} - -void OpDescription::setInputShape(const size_t index, const Shape &shape) { - assert(index < getNumInputs()); - _inputShapes[index] = shape; -} - -const Shape &OpDescription::getOutputShape(const size_t index) const { - assert(index < getNumOutputs()); - return _outputShapes.at(index); -} - -void OpDescription::setOutputShape(const size_t index, const Shape &shape) { - assert(index < getNumOutputs()); - _outputShapes[index] = shape; -} - -OpDescription::OpDescription(const size_t max_inputs, const size_t max_outputs) - : _max_inputs(max_inputs), _max_outputs(max_outputs) { -} - -size_t OpDescription::getNumInputs() const { return _max_inputs; } - -size_t OpDescription::getNumOutputs() const { return _max_outputs; } - -} // namespace mir -} // namespace nnc diff --git a/contrib/nnc/include/core/modelIR/graph.h b/contrib/nnc/include/core/modelIR/Graph.h similarity index 67% rename from contrib/nnc/include/core/modelIR/graph.h rename to contrib/nnc/include/core/modelIR/Graph.h index 7b65a96..4263c91 100644 --- a/contrib/nnc/include/core/modelIR/graph.h +++ b/contrib/nnc/include/core/modelIR/Graph.h @@ -22,9 +22,8 @@ #include #include -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/operations/VariableOp.h" -#include "core/modelIR/ir_node.h" namespace nnc { namespace mir { @@ -38,47 +37,49 @@ class Graph { virtual ~Graph(); template - //make this method callable only with OpDescription subclasses - typename std::enable_if::value, INode::Ref>::type + //make this method callable only with Operation subclasses + typename std::enable_if::value, Operation*>::type create(const std::string& name, Args&&...args) { - auto node = Node::createNode(name, _lastNodeId++, std::forward(args)...); - registerNode(node); - return node; + auto op = new T(std::forward(args)...); + op->setId(_lastNodeId++); + op->setName(name); + registerOp(op); + return op; } void accept(IVisitor* visitor); - void markOutput(INode::Ref node); - INode::Ref getInput(const std::string& name); - INode::Ref getOutput(const std::string& name); + void markOutput(Operation* op); + Operation* getInput(const std::string& name); + Operation* getOutput(const std::string& name); /** * @brief Returns all inputs from graph * @returns vector containing all graph input nodes */ - std::vector collectInputs(); + std::vector collectInputs(); /** * @brief Returns all outputs from graph * @returns vector containing all graph outputs nodes */ - std::vector collectOutputs(); + std::vector collectOutputs(); /** * @brief Subsitude node in graph with another keeping all edges - * @param node Node to subsitude + * @param op Node to subsitude * @param with Node to place instead */ - void replaceNode(const INode* node, INode* with); + void replaceNode(const Operation* op, Operation* with); /** * @brief Replaces referenced node with input(VariableOp) node - * @param node Node to replace + * @param op Node to replace * @return Input node which is placed in graph instead of passed node * @warning deletes passed node */ - Node* replaceWithInputNode(const INode* node); + ops::VariableOp* replaceWithInputNode(const Operation* op); /** * @brief Change graph inputs to nodes with names in newInputs @@ -96,24 +97,24 @@ class Graph { void replaceOutputNodes(const std::vector& new_outputs); private: - void registerNode(INode::Ref node) { - _nodes.push_back(node); + void registerOp(Operation* op) { + _ops.push_back(op); } - //TODO: maybe make user to mark input _nodes in a more obvious way - void registerNode(Node* node) { - auto it = _inputs.find(node->getName()); + //TODO: maybe make user to mark input _ops in a more obvious way + void registerOp(ops::VariableOp* op) { + auto it = _inputs.find(op->getName()); if( it != _inputs.end()) { throw std::runtime_error("Input name collision"); } - _inputs.insert(it, {node->getName(), node}); - _nodes.push_back(node); + _inputs.insert(it, {op->getName(), op}); + _ops.push_back(op); } - std::vector _nodes; + std::vector _ops; size_t _lastNodeId = 0; - std::unordered_map _inputs; - std::unordered_map _outputs; + std::unordered_map _inputs; + std::unordered_map _outputs; }; } // namespace mir diff --git a/contrib/nnc/include/core/modelIR/IrDotDumper.h b/contrib/nnc/include/core/modelIR/IrDotDumper.h index 7880f1a..f053843 100644 --- a/contrib/nnc/include/core/modelIR/IrDotDumper.h +++ b/contrib/nnc/include/core/modelIR/IrDotDumper.h @@ -28,7 +28,6 @@ #include "core/modelIR/operations/VariableOp.h" #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/EluOp.h" -#include "core/modelIR/operations/operation.h" #include "core/modelIR/operations/ConcatOp.h" #include "core/modelIR/operations/BiasAddOp.h" #include "core/modelIR/operations/ReshapeOp.h" @@ -51,29 +50,28 @@ namespace mir * @brief Model IR visitor that can be used to output Model IR as a .dot graph. * @usage Run on a Model IR graph as a visitor, and then call writeDot passing it a stream */ -class IrDotDumper : public IVisitor -{ +class IrDotDumper : public IVisitor { public: - void visit(INode *node, ops::ConcatOp &op) override; - void visit(INode *node, ops::ReluOp &op) override; - void visit(INode *node, ops::Conv2DOp &op) override; - void visit(INode *node, ops::DepthwiseConv2DOp &op) override; - void visit(INode *node, ops::SoftmaxOp &op) override; - void visit(INode *node, ops::PoolOp &op) override; - void visit(INode *node, ops::FullyConnectedOp &op) override; - void visit(INode *node, ops::CappedReluOp &op) override; - void visit(INode *node, ops::BiasAddOp &op) override; - void visit(INode *node, ops::VariableOp &op) override; - void visit(INode *node, ops::ReshapeOp &op) override; - void visit(INode *node, ops::ScaleOp &op) override; - void visit(INode *node, ops::BatchNormOp &op) override; - void visit(INode *node, ops::DropoutOp &op) override; - void visit(INode *node, ops::DeConv2DOp &op) override; - void visit(INode *node, ops::EluOp &op) override; - void visit(INode *node, ops::TanhOp &op) override; - void visit(INode *node, ops::ElementwiseOp &op) override; - void visit(INode* node, ops::SqueezeOp& op) override; - void visit(INode* node, ops::PadOp& op) override; + void visit(ops::ConcatOp& op) override; + void visit(ops::ReluOp& op) override; + void visit(ops::Conv2DOp& op) override; + void visit(ops::DepthwiseConv2DOp& op) override; + void visit(ops::SoftmaxOp& op) override; + void visit(ops::PoolOp& op) override; + void visit(ops::FullyConnectedOp& op) override; + void visit(ops::CappedReluOp& op) override; + void visit(ops::BiasAddOp& op) override; + void visit(ops::VariableOp& op) override; + void visit(ops::ReshapeOp& op) override; + void visit(ops::ScaleOp& op) override; + void visit(ops::BatchNormOp& op) override; + void visit(ops::DropoutOp& op) override; + void visit(ops::DeConv2DOp& op) override; + void visit(ops::EluOp& op) override; + void visit(ops::TanhOp& op) override; + void visit(ops::ElementwiseOp& op) override; + void visit(ops::SqueezeOp& op) override; + void visit(ops::PadOp& op) override; void writeDot(std::ostream &os) { dotBuilder.writeDot(os); }; diff --git a/contrib/nnc/include/core/modelIR/Operation.h b/contrib/nnc/include/core/modelIR/Operation.h new file mode 100644 index 0000000..e6a9e6a --- /dev/null +++ b/contrib/nnc/include/core/modelIR/Operation.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NNC_CORE_IR_MODEL_OPERATION_H_ +#define _NNC_CORE_IR_MODEL_OPERATION_H_ + +#include +#include +#include "TensorVariant.h" +#include "core/modelIR/Visitor.h" + +#include "Shape.h" + +namespace nnc { +namespace mir { + +class Operation; + +struct IODescriptor { + Operation* op; + std::size_t index; +}; + +class Operation { +public: + enum class Type { +#define HANDLE_OP(OpType, OpClass) OpType, +#include "core/modelIR/operations/operations.lst.h" +#undef HANDLE_OP + }; + + virtual ~Operation() = default; + + Type getType() const { return _type; } + + std::size_t getId() const { return _id; } + void setId(std::size_t id) { _id = id; } + + const std::string& getName() const { return _name; } + void setName(const std::string& name) { _name = name; } + + std::size_t getNumInputs() const { return _max_inputs; } + std::size_t getNumOutputs() const { return _max_outputs; } + + void connectInputTo(int inputIndex, const IODescriptor& descriptor); + + const IODescriptor getOutput(std::size_t index); + + const std::vector& getPrevNodes() const { return _inputs; } + const std::vector& getNextNodes() const { return _outputs; } + + std::vector& getMutablePrevNodes() { return _inputs; } + std::vector& getMutableNextNodes() { return _outputs; } + + const nnc::mir::Shape& getInputShape(std::size_t index) const; + const nnc::mir::Shape& getOutputShape(std::size_t index) const; + void setInputShape(std::size_t index, const nnc::mir::Shape& shape); + void setOutputShape(std::size_t index, const nnc::mir::Shape& shape); + + void accept(IVisitor* v); + +protected: + Operation(Type type, std::size_t max_inputs, std::size_t max_outputs); + +private: + Type _type; + std::size_t _id; + std::string _name; + std::size_t _max_inputs; + std::size_t _max_outputs; + std::vector _inputs; + std::vector _outputs; + std::map _inputShapes; + std::map _outputShapes; +}; + +} // namespace mir +} // namespace nnc + +#endif //_NNC_CORE_IR_MODEL_OPERATION_H_ diff --git a/contrib/nnc/include/core/modelIR/ShapeInference.h b/contrib/nnc/include/core/modelIR/ShapeInference.h index 9679e42..3dcd9af 100644 --- a/contrib/nnc/include/core/modelIR/ShapeInference.h +++ b/contrib/nnc/include/core/modelIR/ShapeInference.h @@ -19,7 +19,7 @@ #include #include "core/modelIR/Visitor.h" -#include "core/modelIR/ir_node.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -27,30 +27,30 @@ namespace mir { class ShapeInference : public IVisitor { - public: - void visit(INode::Ref node, ops::ConcatOp &op) override; - void visit(INode::Ref node, ops::Conv2DOp &op) override; - void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override; - void visit(INode::Ref node, ops::ReluOp &op) override; - void visit(INode::Ref node, ops::SoftmaxOp &op) override; - void visit(INode::Ref node, ops::PoolOp &op) override; - void visit(INode::Ref node, ops::FullyConnectedOp &op) override; - void visit(INode::Ref node, ops::CappedReluOp &op) override; - void visit(INode::Ref node, ops::BiasAddOp &op) override; - void visit(INode::Ref node, ops::ReshapeOp &op) override; - void visit(INode::Ref node, ops::VariableOp &op) override; - void visit(INode::Ref node, ops::ScaleOp &op) override; - void visit(INode::Ref node, ops::BatchNormOp &op) override; - void visit(INode::Ref node, ops::DropoutOp &op) override; - void visit(INode::Ref node, ops::TanhOp &op) override; - void visit(INode::Ref node, ops::ElementwiseOp &op) override; - void visit(INode::Ref node, ops::DeConv2DOp &op) override; - void visit(INode::Ref node, ops::EluOp &op) override; - void visit(INode* node, ops::SqueezeOp& op) override; - void visit(INode* node, ops::PadOp& op) override; +public: + void visit(ops::ConcatOp& op) override; + void visit(ops::Conv2DOp& op) override; + void visit(ops::DepthwiseConv2DOp& op) override; + void visit(ops::ReluOp& op) override; + void visit(ops::SoftmaxOp& op) override; + void visit(ops::PoolOp& op) override; + void visit(ops::FullyConnectedOp& op) override; + void visit(ops::CappedReluOp& op) override; + void visit(ops::BiasAddOp& op) override; + void visit(ops::ReshapeOp& op) override; + void visit(ops::VariableOp& op) override; + void visit(ops::ScaleOp& op) override; + void visit(ops::BatchNormOp& op) override; + void visit(ops::DropoutOp& op) override; + void visit(ops::TanhOp& op) override; + void visit(ops::ElementwiseOp& op) override; + void visit(ops::DeConv2DOp& op) override; + void visit(ops::EluOp& op) override; + void visit(ops::SqueezeOp& op) override; + void visit(ops::PadOp& op) override; protected: - void fillInputShapes(INode::Ref node, OpDescription &op); + void fillInputShapes(Operation& op); }; } // namespace mir diff --git a/contrib/nnc/include/core/modelIR/Visitor.h b/contrib/nnc/include/core/modelIR/Visitor.h index 2f35ca7..c9eab8c 100644 --- a/contrib/nnc/include/core/modelIR/Visitor.h +++ b/contrib/nnc/include/core/modelIR/Visitor.h @@ -20,24 +20,21 @@ namespace nnc { namespace mir { -class INode; - //Forward declare operations as we don't need anything but references -namespace ops -{ - #define OP_TYPE(OpType) class OpType; - #include "operations/operations.lst.h" - #undef OP_TYPE -} +namespace ops { +#define HANDLE_OP(OpType, OpClass) class OpClass; +#include "operations/operations.lst.h" +#undef HANDLE_OP +} // namespace ops /** * @brief Visitor Interface declaration */ class IVisitor { - public: - #define OP_TYPE(OpType) virtual void visit(INode*, ops::OpType&) = 0; - #include "operations/operations.lst.h" - #undef OP_TYPE +public: +#define HANDLE_OP(OpType, OpClass) virtual void visit(ops::OpClass&) = 0; +#include "operations/operations.lst.h" +#undef HANDLE_OP virtual ~IVisitor() = default; }; @@ -49,11 +46,11 @@ class IVisitor { * only need to define an implementation of `visit` for a subset of operations in the graph, * while not doing anything for all others. */ -class Visitor: public IVisitor{ +class Visitor : public IVisitor { public: - #define OP_TYPE(OpType) virtual void visit(INode*, ops::OpType&) override; - #include "operations/operations.lst.h" - #undef OP_TYPE +#define HANDLE_OP(OpType, OpClass) virtual void visit(ops::OpClass&) override; +#include "operations/operations.lst.h" +#undef HANDLE_OP ~Visitor() override = default; }; diff --git a/contrib/nnc/include/core/modelIR/ir_dot_builder.h b/contrib/nnc/include/core/modelIR/ir_dot_builder.h index 64b12d7..2bbf9ff 100644 --- a/contrib/nnc/include/core/modelIR/ir_dot_builder.h +++ b/contrib/nnc/include/core/modelIR/ir_dot_builder.h @@ -19,7 +19,6 @@ #include -#include "core/modelIR/ir_node.h" #include "core/modelIR/ir_dot_node_info.h" namespace nnc @@ -36,12 +35,12 @@ class IrDotBuilder public: explicit IrDotBuilder() = default; - void updateWithNode(INode *node, const DotIrNodeInfo &irNodeInfo); + void updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo); void writeDot(std::ostream &os); private: - void addNode(INode *node, const DotIrNodeInfo &irNode); - void addEdge(INode *node1, INode *node2); + void addNode(Operation* op, const DotIrNodeInfo& irNode); + void addEdge(Operation* op1, Operation* op2); std::stringstream dot; }; diff --git a/contrib/nnc/include/core/modelIR/ir_node.h b/contrib/nnc/include/core/modelIR/ir_node.h deleted file mode 100644 index eea16c4..0000000 --- a/contrib/nnc/include/core/modelIR/ir_node.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _NNC_CORE_IR_MODEL_NODE_H_ -#define _NNC_CORE_IR_MODEL_NODE_H_ - -#include -#include -#include - -#include "core/modelIR/operations/operation.h" -#include "core/modelIR/Visitor.h" - -namespace nnc -{ -namespace mir -{ - -class INode -{ -public: - using Ref = INode *; - - struct IODescriptor - { - INode* node; // Data source - size_t index; // Output id - }; - - using IODescriptorVector = std::vector; - - virtual const std::vector &getPrevNodes() const = 0; - virtual const std::vector &getNextNodes() const = 0; - - virtual size_t getId() const = 0; - - virtual OpDescription *getOperation() const = 0; - - virtual const std::string &getName() const = 0; - virtual void setName(const std::string &name) = 0; - - virtual void accept(IVisitor *v) = 0; - - virtual const IODescriptor getOutput(const size_t index) = 0; - virtual void connectInputTo(const int inputIndex, const IODescriptor &descriptor) = 0; - - virtual ~INode() = default; - -protected: - virtual void addNextNode(const INode::Ref) = 0; -}; - -class AbstractNode : public INode -{ -public: - explicit AbstractNode(size_t num_inputs); - const std::vector &getPrevNodes() const override; - const std::vector &getNextNodes() const override; - void connectInputTo(const int inputIndex, const IODescriptor &descriptor) override; - const IODescriptor getOutput(const size_t index) override; - - std::vector& getMutablePrevNodes(); - std::vector& getMutableNextNodes(); - - protected: - virtual void addNextNode(INode::Ref const node) override; - -private: - std::vector _inputs; - std::vector _outputs; -}; - - -struct NodeProperties -{ - explicit NodeProperties(std::string name, const size_t id, OpDescription *op = nullptr) - : name(std::move(name)), op(op), id(id) - { - } - - std::string name; - OpDescription *op; - const size_t id; - - NodeProperties(NodeProperties &&nodeProps) noexcept : name(std::move(nodeProps.name)), op(nodeProps.op), id(nodeProps.id) - { - nodeProps.op = nullptr; - } -}; - -template -class Node : public AbstractNode -{ -public: - OpType *getOperation() const override { return static_cast(_props.op); } - - template - static Node *createNode(const std::string &nodeName, size_t id, Args &&... args) - { - auto node = - new Node(NodeProperties(nodeName, id, new OpType(std::forward(args)...))); - return node; - }; - - size_t getId() const override { return _props.id; }; - - const std::string &getName() const override { return _props.name; }; - - void setName(const std::string &name) override { _props.name = name; } - - void accept(IVisitor *v) override - { - v->visit(this, *static_cast(_props.op)); - } - - ~Node() override { - delete _props.op; - } - -private: - explicit Node(NodeProperties &&properties) : AbstractNode(properties.op->getNumInputs()), _props(std::move(properties)) {}; - - NodeProperties _props; -}; - -} // namespace mir -} // namespace nnc - -#endif //_NNC_CORE_IR_MODEL_NODE_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h b/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h index 8e5d464..9bb0e57 100644 --- a/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h +++ b/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_BATCH_NORM_H_ #define _NNC_CORE_IR_MODEL_BATCH_NORM_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -26,11 +26,11 @@ namespace mir namespace ops { -class BatchNormOp : public OpDescription +class BatchNormOp : public Operation { public: explicit BatchNormOp(float movingAvgFraction, float eps, bool spatial) : - OpDescription(1, 1), + Operation(Type::batchNorm, 1, 1), _movingAvgFraction(movingAvgFraction), _eps(eps), _spatial(spatial) diff --git a/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h b/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h index 67b5152..35b661a 100644 --- a/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h +++ b/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_BIAS_ADD_H_ #define _NNC_CORE_IR_MODEL_BIAS_ADD_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/TensorVariant.h" namespace nnc @@ -27,10 +27,11 @@ namespace mir namespace ops { -class BiasAddOp : public OpDescription +class BiasAddOp : public Operation { public: - explicit BiasAddOp(const TensorVariant &weights) : OpDescription(1, 1), _weights(weights) {} + explicit BiasAddOp(const TensorVariant& weights) : Operation(Type::biasAdd, 1, 1), + _weights(weights) {} const TensorVariant &getWeights() const { return _weights; } diff --git a/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h b/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h index 928c20d..5609b5d 100644 --- a/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_CAPPED_RELU_H_ #define _NNC_CORE_IR_MODEL_CAPPED_RELU_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -26,9 +26,9 @@ namespace mir namespace ops { -class CappedReluOp : public OpDescription { +class CappedReluOp : public Operation { public: - explicit CappedReluOp(float cap) : OpDescription(1, 1), _cap(cap) { + explicit CappedReluOp(float cap) : Operation(Type::cappedReLU, 1, 1), _cap(cap) { } float getCap() const { diff --git a/contrib/nnc/include/core/modelIR/operations/ConcatOp.h b/contrib/nnc/include/core/modelIR/operations/ConcatOp.h index 3ad4c55..928d55d 100644 --- a/contrib/nnc/include/core/modelIR/operations/ConcatOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ConcatOp.h @@ -19,7 +19,7 @@ #include -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/Shape.h" namespace nnc @@ -32,10 +32,10 @@ namespace ops /** * @brief Description of tensor concatenation operation. */ -class ConcatOp : public OpDescription +class ConcatOp : public Operation { public: - ConcatOp(int num_inputs, int32_t axis) : OpDescription(num_inputs, 1), _axis(axis) {} + ConcatOp(int num_inputs, int32_t axis) : Operation(Type::concat, num_inputs, 1), _axis(axis) {} int32_t getAxis() const { diff --git a/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h b/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h index b99de46..2958b16 100644 --- a/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h @@ -19,7 +19,7 @@ #include -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/operations/common.h" #include "core/modelIR/TensorVariant.h" @@ -32,11 +32,11 @@ namespace mir namespace ops { -class Conv2DOp : public OpDescription +class Conv2DOp : public Operation { public: Conv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding) - : OpDescription(1, 1), _kernel(kernel), _strides(strides), + : Operation(Type::conv2D, 1, 1), _kernel(kernel), _strides(strides), _padding(padding) { _pads.resize(3); diff --git a/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h b/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h index 9eaa0e4..24894f4 100644 --- a/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_DECONV_2D_H_ #define _NNC_CORE_IR_MODEL_DECONV_2D_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/operations/common.h" #include "core/modelIR/TensorVariant.h" @@ -28,10 +28,10 @@ namespace nnc { namespace mir { namespace ops { -class DeConv2DOp : public OpDescription { +class DeConv2DOp : public Operation { public: DeConv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding) - : OpDescription(1, 1), _kernel(kernel), _strides(strides), + : Operation(Type::deConv2D, 1, 1), _kernel(kernel), _strides(strides), _padding(padding) { _pads.resize(3); } diff --git a/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h b/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h index 04d00d3..57e963e 100644 --- a/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h @@ -19,7 +19,7 @@ #include -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/TensorVariant.h" #include "core/modelIR/operations/common.h" @@ -32,11 +32,11 @@ namespace mir namespace ops { -class DepthwiseConv2DOp : public OpDescription +class DepthwiseConv2DOp : public Operation { public: explicit DepthwiseConv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding) - : OpDescription(1, 1), _kernel(kernel), _strides(strides), _padding(padding) + : Operation(Type::depthwiseConv, 1, 1), _kernel(kernel), _strides(strides), _padding(padding) { _pads.resize(_kernel.getShape().rank()); } diff --git a/contrib/nnc/include/core/modelIR/operations/DropoutOp.h b/contrib/nnc/include/core/modelIR/operations/DropoutOp.h index 0d3fcc9..2ce8fbd 100644 --- a/contrib/nnc/include/core/modelIR/operations/DropoutOp.h +++ b/contrib/nnc/include/core/modelIR/operations/DropoutOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_DROPOUT_H_ #define _NNC_CORE_IR_MODEL_DROPOUT_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -26,9 +26,9 @@ namespace mir namespace ops { -class DropoutOp : public OpDescription { +class DropoutOp : public Operation { public: - explicit DropoutOp(float rate) : OpDescription(1, 1), _rate(rate) {} + explicit DropoutOp(float rate) : Operation(Type::dropout, 1, 1), _rate(rate) {} /** * @return The ratio of random dropout diff --git a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h index 9951af6..bb24720 100644 --- a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h @@ -17,13 +17,13 @@ #ifndef _NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_ #define _NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { namespace mir { namespace ops { -class ElementwiseOp : public OpDescription { +class ElementwiseOp : public Operation { public: enum class OpType { @@ -36,7 +36,7 @@ public: * @param num_inputs Number of inputs */ explicit ElementwiseOp(OpType op_type, size_t num_inputs) : - OpDescription(num_inputs, 1), _opType(op_type) {}; + Operation(Type::elementwise, num_inputs, 1), _opType(op_type) {}; private: OpType _opType; diff --git a/contrib/nnc/include/core/modelIR/operations/EluOp.h b/contrib/nnc/include/core/modelIR/operations/EluOp.h index 7042460..8a03840 100644 --- a/contrib/nnc/include/core/modelIR/operations/EluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/EluOp.h @@ -17,15 +17,15 @@ #ifndef _NNC_CORE_IR_MODEL_ELU_H_ #define _NNC_CORE_IR_MODEL_ELU_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { namespace mir { namespace ops { -class EluOp : public OpDescription { +class EluOp : public Operation { public: - explicit EluOp(float alpha) : OpDescription(1, 1), _alpha(alpha) {} + explicit EluOp(float alpha) : Operation(Type::ELU, 1, 1), _alpha(alpha) {} float getAlpha() const { return _alpha; diff --git a/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h b/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h index f35963e..00555b8 100644 --- a/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h +++ b/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_FULLY_CONNECTED_OP_H_ #define _NNC_CORE_IR_MODEL_FULLY_CONNECTED_OP_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/TensorVariant.h" namespace nnc @@ -27,10 +27,11 @@ namespace mir namespace ops { -class FullyConnectedOp : public OpDescription +class FullyConnectedOp : public Operation { public: - explicit FullyConnectedOp(const TensorVariant &weights) : OpDescription(1, 1), _weights(weights) {} + explicit FullyConnectedOp(const TensorVariant& weights) : Operation(Type::fullyConnected, 1, 1), + _weights(weights) {} const TensorVariant &getWeights() const { return _weights; } diff --git a/contrib/nnc/include/core/modelIR/operations/PadOp.h b/contrib/nnc/include/core/modelIR/operations/PadOp.h index 3eb5f11..8afe1d2 100644 --- a/contrib/nnc/include/core/modelIR/operations/PadOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PadOp.h @@ -19,7 +19,7 @@ #include -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/TensorVariant.h" @@ -27,7 +27,7 @@ namespace nnc { namespace mir { namespace ops { -class PadOp : public OpDescription { +class PadOp : public Operation { public: enum class PaddingMode { CONST, @@ -36,7 +36,7 @@ public: }; explicit PadOp(PaddingMode paddingMode, int numDims, const TensorVariant& constant_value) - : OpDescription(1, 1), _paddingMode(paddingMode), + : Operation(Type::pad, 1, 1), _paddingMode(paddingMode), _numDims(numDims), _constant_value(constant_value) { _paddings.resize(_numDims); diff --git a/contrib/nnc/include/core/modelIR/operations/PoolOp.h b/contrib/nnc/include/core/modelIR/operations/PoolOp.h index 8dda6cb..7f14f2f 100644 --- a/contrib/nnc/include/core/modelIR/operations/PoolOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PoolOp.h @@ -19,7 +19,7 @@ #include -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/operations/common.h" #include "core/modelIR/Shape.h" @@ -31,7 +31,7 @@ namespace mir namespace ops { -class PoolOp : public OpDescription +class PoolOp : public Operation { public: enum class PoolingType @@ -49,7 +49,7 @@ public: explicit PoolOp(const Shape &windowShape, const Shape &strides, PoolingType poolType, PaddingType padding, BorderType borderType) - : OpDescription(1, 1), _padding(padding), _poolingType(poolType), + : Operation(Type::pool, 1, 1), _padding(padding), _poolingType(poolType), _borderType(borderType), _windowShape(windowShape), _strides(strides) { _pads.resize(_windowShape.rank()); diff --git a/contrib/nnc/include/core/modelIR/operations/ReluOp.h b/contrib/nnc/include/core/modelIR/operations/ReluOp.h index f61dbdd..42a86a0 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReluOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_RELU_H_ #define _NNC_CORE_IR_MODEL_RELU_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -26,10 +26,10 @@ namespace mir namespace ops { -class ReluOp : public OpDescription +class ReluOp : public Operation { public: - explicit ReluOp() : OpDescription(1, 1) {} + explicit ReluOp() : Operation(Type::ReLU, 1, 1) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h b/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h index ba18745..04c25bd 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h @@ -16,7 +16,7 @@ #pragma once -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -25,10 +25,10 @@ namespace mir namespace ops { -class ReshapeOp : public OpDescription +class ReshapeOp : public Operation { public: - explicit ReshapeOp() : OpDescription(1, 1) {} + explicit ReshapeOp() : Operation(Type::reshape, 1, 1) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/ScaleOp.h b/contrib/nnc/include/core/modelIR/operations/ScaleOp.h index 57ba941..3f207e0 100644 --- a/contrib/nnc/include/core/modelIR/operations/ScaleOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ScaleOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_SCALE_H_ #define _NNC_CORE_IR_MODEL_SCALE_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -26,10 +26,11 @@ namespace mir namespace ops { -class ScaleOp : public OpDescription +class ScaleOp : public Operation { public: - explicit ScaleOp(const TensorVariant &weights) : OpDescription(1, 1), _weights(weights) {} + explicit ScaleOp(const TensorVariant& weights) : Operation(Type::scale, 1, 1), + _weights(weights) {} /** * @return The input 1-dimensional scale tensor. diff --git a/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h b/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h index 934e445..059dc0d 100644 --- a/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_SOFTMAX_H_ #define _NNC_CORE_IR_MODEL_SOFTMAX_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/Shape.h" namespace nnc @@ -30,10 +30,10 @@ namespace ops /** * @brief description of softmax operation. */ -class SoftmaxOp : public OpDescription +class SoftmaxOp : public Operation { public: - explicit SoftmaxOp(int32_t axis) : OpDescription(1, 1), _axis(axis) {} + explicit SoftmaxOp(int32_t axis) : Operation(Type::softmax, 1, 1), _axis(axis) {} int32_t getAxis() const { diff --git a/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h b/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h index 7e187b6..fb2875e 100644 --- a/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h @@ -17,17 +17,16 @@ #ifndef _NNC_CORE_IR_MODEL_SQUEEZE_OP_H_ #define _NNC_CORE_IR_MODEL_SQUEEZE_OP_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { namespace mir { namespace ops { -class SqueezeOp : public OpDescription { +class SqueezeOp : public Operation { public: explicit SqueezeOp(const std::vector& dims_to_squeeze) : - OpDescription(1, 1), - _dims_to_squeeze(dims_to_squeeze) {} + Operation(Type::squeeze, 1, 1), _dims_to_squeeze(dims_to_squeeze) {} int32_t getNumSqueezeDims() { return static_cast(_dims_to_squeeze.size()); diff --git a/contrib/nnc/include/core/modelIR/operations/TanhOp.h b/contrib/nnc/include/core/modelIR/operations/TanhOp.h index d1244c2..a470528 100644 --- a/contrib/nnc/include/core/modelIR/operations/TanhOp.h +++ b/contrib/nnc/include/core/modelIR/operations/TanhOp.h @@ -18,15 +18,15 @@ #define _NNC_CORE_IR_MODEL_TANH_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { namespace mir { namespace ops { -class TanhOp : public OpDescription { +class TanhOp : public Operation { public: - explicit TanhOp() : OpDescription(1, 1) {} + explicit TanhOp() : Operation(Type::tanh, 1, 1) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/VariableOp.h b/contrib/nnc/include/core/modelIR/operations/VariableOp.h index e1f8b8c..e71cbb8 100644 --- a/contrib/nnc/include/core/modelIR/operations/VariableOp.h +++ b/contrib/nnc/include/core/modelIR/operations/VariableOp.h @@ -17,7 +17,7 @@ #ifndef _NNC_CORE_IR_MODEL_VARIABLE_H_ #define _NNC_CORE_IR_MODEL_VARIABLE_H_ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" namespace nnc { @@ -26,10 +26,10 @@ namespace mir namespace ops { -class VariableOp : public OpDescription +class VariableOp : public Operation { public: - explicit VariableOp() : OpDescription(0, 1) {} + explicit VariableOp() : Operation(Type::variable, 0, 1) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/operation.h b/contrib/nnc/include/core/modelIR/operations/operation.h deleted file mode 100644 index 792371d..0000000 --- a/contrib/nnc/include/core/modelIR/operations/operation.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef _NNC_CORE_IR_MODEL_OPERATION_H_ -#define _NNC_CORE_IR_MODEL_OPERATION_H_ - -#include -#include -#include "core/modelIR/TensorVariant.h" - -#include "core/modelIR/Shape.h" - -namespace nnc -{ -namespace mir -{ - -class OpDescription { - public: - explicit OpDescription(const size_t max_inputs, const size_t max_outputs); - virtual ~OpDescription() = default; - - size_t getNumInputs() const; - size_t getNumOutputs() const; - - const nnc::mir::Shape &getInputShape(const size_t index) const; - virtual void setInputShape(const size_t index, const nnc::mir::Shape &shape); - - virtual const nnc::mir::Shape &getOutputShape(const size_t index) const; - void setOutputShape(const size_t index, const nnc::mir::Shape &shape); - - private: - size_t _max_inputs; - size_t _max_outputs; - - std::map _inputShapes; - std::map _outputShapes; -}; - -} // namespace mir -} // namespace nnc - -#endif //_NNC_CORE_IR_MODEL_OPERATION_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/operations.lst.h b/contrib/nnc/include/core/modelIR/operations/operations.lst.h index 58554d6..c04286a 100644 --- a/contrib/nnc/include/core/modelIR/operations/operations.lst.h +++ b/contrib/nnc/include/core/modelIR/operations/operations.lst.h @@ -14,27 +14,27 @@ * limitations under the License. */ -#ifndef OP_TYPE -#error "You should define OP_TYPE before including this file" -#endif //OP_TYPE +#ifndef HANDLE_OP +#error "You should define HANDLE_OP before including this file" +#endif //HANDLE_OP -OP_TYPE(ConcatOp) -OP_TYPE(Conv2DOp) -OP_TYPE(DepthwiseConv2DOp) -OP_TYPE(SoftmaxOp) -OP_TYPE(PoolOp) -OP_TYPE(FullyConnectedOp) -OP_TYPE(CappedReluOp) -OP_TYPE(BiasAddOp) -OP_TYPE(VariableOp) -OP_TYPE(ReluOp) -OP_TYPE(ReshapeOp) -OP_TYPE(ScaleOp) -OP_TYPE(BatchNormOp) -OP_TYPE(DropoutOp) -OP_TYPE(TanhOp) -OP_TYPE(ElementwiseOp) -OP_TYPE(DeConv2DOp) -OP_TYPE(EluOp) -OP_TYPE(SqueezeOp) -OP_TYPE(PadOp) +HANDLE_OP(concat, ConcatOp) +HANDLE_OP(conv2D, Conv2DOp) +HANDLE_OP(depthwiseConv, DepthwiseConv2DOp) +HANDLE_OP(softmax, SoftmaxOp) +HANDLE_OP(pool, PoolOp) +HANDLE_OP(fullyConnected, FullyConnectedOp) +HANDLE_OP(cappedReLU, CappedReluOp) +HANDLE_OP(biasAdd, BiasAddOp) +HANDLE_OP(variable, VariableOp) +HANDLE_OP(ReLU, ReluOp) +HANDLE_OP(reshape, ReshapeOp) +HANDLE_OP(scale, ScaleOp) +HANDLE_OP(batchNorm, BatchNormOp) +HANDLE_OP(dropout, DropoutOp) +HANDLE_OP(tanh, TanhOp) +HANDLE_OP(elementwise, ElementwiseOp) +HANDLE_OP(deConv2D, DeConv2DOp) +HANDLE_OP(ELU, EluOp) +HANDLE_OP(squeeze, SqueezeOp) +HANDLE_OP(pad, PadOp) diff --git a/contrib/nnc/include/pass/PassData.h b/contrib/nnc/include/pass/PassData.h index e109460..fbc79e7 100644 --- a/contrib/nnc/include/pass/PassData.h +++ b/contrib/nnc/include/pass/PassData.h @@ -17,7 +17,7 @@ #ifndef NNCC_PASSDATA_H #define NNCC_PASSDATA_H -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/TensorVariant.h" diff --git a/contrib/nnc/include/passes/acl_soft_backend/AclCppOpGenerator.h b/contrib/nnc/include/passes/acl_soft_backend/AclCppOpGenerator.h index 92f5082..6c9af1e 100644 --- a/contrib/nnc/include/passes/acl_soft_backend/AclCppOpGenerator.h +++ b/contrib/nnc/include/passes/acl_soft_backend/AclCppOpGenerator.h @@ -19,8 +19,8 @@ #include "core/modelIR/Visitor.h" #include "core/modelIR/TensorVariant.h" -#include "core/modelIR/operations/operation.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Operation.h" +#include "core/modelIR/Graph.h" #include "ArtifactModel.h" #include "ArtifactGeneratorCppCode.h" #include "ArtifactGeneratorCppDecl.h" @@ -45,29 +45,28 @@ public: /** * @brief Implementations of the IVisitor visitors. - * @param node * @param op */ - void visit(mir::INode* node, mir::ops::ConcatOp& op) override; - void visit(mir::INode* node, mir::ops::Conv2DOp& op) override; - void visit(mir::INode* node, mir::ops::DepthwiseConv2DOp& op) override; - void visit(mir::INode* node, mir::ops::SoftmaxOp& op) override; - void visit(mir::INode* node, mir::ops::PoolOp& op) override; - void visit(mir::INode* node, mir::ops::FullyConnectedOp& op) override; - void visit(mir::INode* node, mir::ops::CappedReluOp& op) override; - void visit(mir::INode* node, mir::ops::BiasAddOp& op) override; - void visit(mir::INode* node, mir::ops::VariableOp& op) override; - void visit(mir::INode* node, mir::ops::ReluOp& op) override; - void visit(mir::INode* node, mir::ops::ReshapeOp& op) override; - void visit(mir::INode* node, mir::ops::ScaleOp& op) override; - void visit(mir::INode* node, mir::ops::BatchNormOp& op) override; - void visit(mir::INode* node, mir::ops::DropoutOp& op) override; - void visit(mir::INode* node, mir::ops::TanhOp& op) override; - void visit(mir::INode* node, mir::ops::ElementwiseOp& op) override; - void visit(mir::INode* node, mir::ops::DeConv2DOp& op) override; - void visit(mir::INode* node, mir::ops::EluOp& op) override; - void visit(mir::INode* node, mir::ops::SqueezeOp& op) override; - void visit(mir::INode* node, mir::ops::PadOp& op) override; + void visit(mir::ops::ConcatOp& op) override; + void visit(mir::ops::Conv2DOp& op) override; + void visit(mir::ops::DepthwiseConv2DOp& op) override; + void visit(mir::ops::SoftmaxOp& op) override; + void visit(mir::ops::PoolOp& op) override; + void visit(mir::ops::FullyConnectedOp& op) override; + void visit(mir::ops::CappedReluOp& op) override; + void visit(mir::ops::BiasAddOp& op) override; + void visit(mir::ops::VariableOp& op) override; + void visit(mir::ops::ReluOp& op) override; + void visit(mir::ops::ReshapeOp& op) override; + void visit(mir::ops::ScaleOp& op) override; + void visit(mir::ops::BatchNormOp& op) override; + void visit(mir::ops::DropoutOp& op) override; + void visit(mir::ops::TanhOp& op) override; + void visit(mir::ops::ElementwiseOp& op) override; + void visit(mir::ops::DeConv2DOp& op) override; + void visit(mir::ops::EluOp& op) override; + void visit(mir::ops::SqueezeOp& op) override; + void visit(mir::ops::PadOp& op) override; private: using AF = ArtifactFactory; @@ -76,8 +75,7 @@ private: * @brief The common part of the convolution and the depthwise convolution. */ template - void genConvolution(mir::INode* node, Op& op, const std::string& acl_func_name, - const std::string& suffix); + void genConvolution(Op& op, const std::string& acl_func_name, const std::string& suffix); /** * @brief Generates different types of activation functions: ReLU, Tanh etc. @@ -86,8 +84,8 @@ private: * LINEAR, TANH. * @param b - betha parameter used by some activation functions: LINEAR, LU_BOUNDED_RELU, TANH. */ - void genActivation(mir::INode* node, mir::OpDescription& op, const std::string& activation_name, - float a = 0, float b = 0); + void + genActivation(mir::Operation& op, const std::string& activation_name, float a = 0, float b = 0); /** * @brief Used to generate a binary addition operation in handling of the elementwise. @@ -141,7 +139,7 @@ private: /** * @brief Generates a unique name for the tensor. */ - std::string tensorName(mir::INode* node) const; + std::string tensorName(mir::Operation* op) const; /** * @brief Generates tensor shape in DOM. @@ -166,11 +164,11 @@ private: /** * @brief Generates a DOM tensor. - * @param node - node for which this tensor generated. + * @param op - an IR operation for which this tensor is generated. * @param ir_shape - a shape in IR. * @return - a DOM identifier for the created tensor. */ - std::shared_ptr genTensor(mir::INode* node, const mir::Shape& ir_shape); + std::shared_ptr genTensor(mir::Operation& op, const mir::Shape& ir_shape); /** * @brief Generates accessors for the input/output tensors. @@ -183,8 +181,8 @@ private: */ void serializeTensor(const mir::TensorVariant& tensor); - std::set _inputs; - std::set _outputs; + std::set _inputs; + std::set _outputs; std::set _tensorNames; /** diff --git a/contrib/nnc/include/passes/caffe_frontend/caffe_importer.h b/contrib/nnc/include/passes/caffe_frontend/caffe_importer.h index c89ec3a..33ae1f8 100644 --- a/contrib/nnc/include/passes/caffe_frontend/caffe_importer.h +++ b/contrib/nnc/include/passes/caffe_frontend/caffe_importer.h @@ -68,7 +68,7 @@ private: std::unique_ptr _opCreator; // Maps Caffe blob name to MIR IODescriptor. - std::map _blobNameToIODescriptor; + std::map _blobNameToIODescriptor; static const std::map _operatorTypes; std::set _problemsOpSet; @@ -107,7 +107,7 @@ private: /** * @brief Return MIR IODescriptors for the inputs of the given layer. */ - mir::INode::IODescriptorVector getMIRInputsForLayer(const ::caffe::LayerParameter& layer); + std::vector getMIRInputsForLayer(const ::caffe::LayerParameter& layer); /** * @brief Prepare Caffe layer parameters for Model IR operation creator. diff --git a/contrib/nnc/include/passes/common_frontend/nn_importer.h b/contrib/nnc/include/passes/common_frontend/nn_importer.h index 099f52c..6f63d8a 100644 --- a/contrib/nnc/include/passes/common_frontend/nn_importer.h +++ b/contrib/nnc/include/passes/common_frontend/nn_importer.h @@ -17,7 +17,7 @@ #ifndef FRONTEND_COMMON_INCLUDE_NN_IMPORTER_ #define FRONTEND_COMMON_INCLUDE_NN_IMPORTER_ -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" namespace nnc { diff --git a/contrib/nnc/include/passes/interpreter/Interpreter.h b/contrib/nnc/include/passes/interpreter/Interpreter.h index 13339af..1bc2af0 100644 --- a/contrib/nnc/include/passes/interpreter/Interpreter.h +++ b/contrib/nnc/include/passes/interpreter/Interpreter.h @@ -23,7 +23,7 @@ #include #include "core/modelIR/Visitor.h" -#include "core/modelIR/ir_node.h" +#include "core/modelIR/Operation.h" #include "core/modelIR/Tensor.h" @@ -32,34 +32,33 @@ namespace nnc namespace mir { -class NNInterpreter : public IVisitor -{ +class NNInterpreter : public IVisitor { public: explicit NNInterpreter() = default; - void visit(INode::Ref node, ops::ConcatOp &op) override; - void visit(INode::Ref node, ops::Conv2DOp &op) override; - void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override; - void visit(INode::Ref node, ops::ReluOp &op) override; - void visit(INode::Ref node, ops::SoftmaxOp &op) override; - void visit(INode::Ref node, ops::PoolOp &op) override; - void visit(INode::Ref node, ops::FullyConnectedOp &op) override; - void visit(INode::Ref node, ops::CappedReluOp &op) override; - void visit(INode::Ref node, ops::BiasAddOp &op) override; - void visit(INode::Ref node, ops::VariableOp &op) override; - void visit(INode::Ref node, ops::ReshapeOp &op) override; - void visit(INode::Ref node, ops::ScaleOp &op) override; - void visit(INode::Ref node, ops::BatchNormOp &op) override; - void visit(INode::Ref node, ops::DropoutOp &op) override; - void visit(INode::Ref node, ops::TanhOp &op) override; - void visit(INode::Ref node, ops::ElementwiseOp &op) override; - void visit(INode::Ref node, ops::DeConv2DOp &op) override; - void visit(INode::Ref node, ops::EluOp &op) override; - void visit(INode* node, ops::SqueezeOp& op) override; - void visit(INode* node, ops::PadOp& op) override; + void visit(ops::ConcatOp& op) override; + void visit(ops::Conv2DOp& op) override; + void visit(ops::DepthwiseConv2DOp& op) override; + void visit(ops::ReluOp& op) override; + void visit(ops::SoftmaxOp& op) override; + void visit(ops::PoolOp& op) override; + void visit(ops::FullyConnectedOp& op) override; + void visit(ops::CappedReluOp& op) override; + void visit(ops::BiasAddOp& op) override; + void visit(ops::VariableOp& op) override; + void visit(ops::ReshapeOp& op) override; + void visit(ops::ScaleOp& op) override; + void visit(ops::BatchNormOp& op) override; + void visit(ops::DropoutOp& op) override; + void visit(ops::TanhOp& op) override; + void visit(ops::ElementwiseOp& op) override; + void visit(ops::DeConv2DOp& op) override; + void visit(ops::EluOp& op) override; + void visit(ops::SqueezeOp& op) override; + void visit(ops::PadOp& op) override; void setInput(const std::string &name, const TensorVariant& data); - std::vector &getResult(INode::Ref node); + std::vector &getResult(Operation* op); /** * @brief Intermediate interpreter results getter * @param nodeName - name of node @@ -73,14 +72,14 @@ private: std::vector &var(size_t id); /** * @brief Used to collect nodes data for getting intermediate interpreter results - * @param n - reference to node + * @param op - reference to node */ - void mapByName(INode::Ref n); + void mapByName(Operation* op); private: std::map> vars; std::unordered_map data; - std::map nodeByName; + std::map _opByName; }; } // namespace mir diff --git a/contrib/nnc/include/passes/soft_backend/BaseGenerator.h b/contrib/nnc/include/passes/soft_backend/BaseGenerator.h index 452d238..8aad740 100644 --- a/contrib/nnc/include/passes/soft_backend/BaseGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/BaseGenerator.h @@ -17,7 +17,7 @@ #ifndef _NNC_SOFT_BACKEND_BASE_GENERATOR_H_ #define _NNC_SOFT_BACKEND_BASE_GENERATOR_H_ -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "pass/Pass.h" #include "pass/PassData.h" diff --git a/contrib/nnc/include/passes/tflite_frontend/tflite_importer.h b/contrib/nnc/include/passes/tflite_frontend/tflite_importer.h index dda8aa0..ee1a291 100644 --- a/contrib/nnc/include/passes/tflite_frontend/tflite_importer.h +++ b/contrib/nnc/include/passes/tflite_frontend/tflite_importer.h @@ -27,8 +27,7 @@ #include "passes/common_frontend/nn_importer.h" #include "passes/common_frontend/model_allocation.h" -#include "core/modelIR/graph.h" -#include "core/modelIR/ir_node.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/TensorUtil.h" #include "core/modelIR/TensorVariant.h" @@ -93,7 +92,7 @@ private: // This map maps indices of TFLite tensors to MIR operations/nodes // that correspond to operations having these tensors as output. - std::map _opsForTensorsTheyOutput; + std::map _opsForTensorsTheyOutput; std::set _problemsOpSet; @@ -134,7 +133,7 @@ private: /** * @brief Return MIR ops, preceding given tflite operator */ - std::vector getPrecedingMIROps(const ::tflite::Operator* op); + std::vector getPrecedingMIROps(const ::tflite::Operator* op); std::shared_ptr createTensor(const ::tflite::Tensor* t, const ::tflite::Buffer* b); diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index 6f4cca3..41da426 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -1,6 +1,5 @@ #include "passes/acl_soft_backend/AclCppOpGenerator.h" #include "passes/acl_soft_backend/AclCppException.h" -#include "core/modelIR/ir_node.h" #include "core/modelIR/ShapeRange.h" #include "core/modelIR/TensorUtil.h" #include "option/Options.h" @@ -63,20 +62,20 @@ const ArtifactModule& AclCppOpGenerator::generate(mir::Graph* g) { return _module; } -void AclCppOpGenerator::visit(INode* node, ops::ConcatOp& op) { +void AclCppOpGenerator::visit(ops::ConcatOp& op) { static const char* axis_names[] = {"arm_compute::DataLayoutDimension::CHANNEL", "arm_compute::DataLayoutDimension::HEIGHT", "arm_compute::DataLayoutDimension::WIDTH", "arm_compute::DataLayoutDimension::BATCHES"}; assert(op.getAxis() < sizeof(axis_names) / sizeof(const char*)); - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); auto prefix = out->name() + "_concatenate_layer"; auto inputs_var = _constrBlock->var("std::vector", prefix + "_inputs"); auto inputs = inputs_var->use(); - for (auto i : node->getPrevNodes()) - _constrBlock->call("push_back", {AF::ref(AF::id(tensorName(i.node)))}, inputs); + for (auto i : op.getPrevNodes()) + _constrBlock->call("push_back", {AF::ref(AF::id(tensorName(i.op)))}, inputs); auto concat_layer_var = _artifactClass->var(false, "arm_compute::CLConcatenateLayer", prefix); auto concat_layer = concat_layer_var->use(); @@ -85,21 +84,21 @@ void AclCppOpGenerator::visit(INode* node, ops::ConcatOp& op) { _infBlock->call("run", {}, concat_layer); } -void AclCppOpGenerator::visit(INode* node, ops::Conv2DOp& op) { - genConvolution(node, op, "arm_compute::CLConvolutionLayer", "_convolution_layer"); +void AclCppOpGenerator::visit(ops::Conv2DOp& op) { + genConvolution(op, "arm_compute::CLConvolutionLayer", "_convolution_layer"); } -void AclCppOpGenerator::visit(INode* node, ops::DepthwiseConv2DOp& op) { - genConvolution(node, op, "arm_compute::CLDepthwiseConvolutionLayer", +void AclCppOpGenerator::visit(ops::DepthwiseConv2DOp& op) { + genConvolution(op, "arm_compute::CLDepthwiseConvolutionLayer", "_depthwise_convolution_layer"); } -void AclCppOpGenerator::visit(INode* node, ops::SoftmaxOp& op) { - auto& prev_nodes = node->getPrevNodes(); - assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; - auto in = AF::id(tensorName(in_node)); - auto out = genTensor(node, op.getOutputShape(0)); +void AclCppOpGenerator::visit(ops::SoftmaxOp& op) { + auto& in_ops = op.getPrevNodes(); + assert(in_ops.size() == 1); + auto in_op = in_ops[0].op; + auto in = AF::id(tensorName(in_op)); + auto out = genTensor(op, op.getOutputShape(0)); auto sm_layer_var = _artifactClass->var(false, "arm_compute::CLSoftmaxLayer", out->name() + "_softmax_layer"); auto sm_layer = sm_layer_var->use(); @@ -107,7 +106,7 @@ void AclCppOpGenerator::visit(INode* node, ops::SoftmaxOp& op) { _infBlock->call("run", {}, sm_layer); } -void AclCppOpGenerator::visit(INode* node, ops::PoolOp& op) { +void AclCppOpGenerator::visit(ops::PoolOp& op) { const char* pooling_type; switch (op.getPoolingType()) { @@ -121,12 +120,12 @@ void AclCppOpGenerator::visit(INode* node, ops::PoolOp& op) { assert(false && "Not a supported pooling type"); } - auto& prev_nodes = node->getPrevNodes(); + auto& prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; - auto in = AF::id(tensorName(in_node)); - auto out = genTensor(node, op.getOutputShape(0)); + auto in_op = prev_nodes[0].op; + auto in = AF::id(tensorName(in_op)); + auto out = genTensor(op, op.getOutputShape(0)); auto prefix = out->name() + "_pooling_layer"; auto pad_stride_info_var = _constrBlock->var("arm_compute::PadStrideInfo", @@ -151,19 +150,19 @@ void AclCppOpGenerator::visit(INode* node, ops::PoolOp& op) { _infBlock->call("run", {}, pooling_layer); } -void AclCppOpGenerator::visit(INode* node, ops::FullyConnectedOp& op) { +void AclCppOpGenerator::visit(ops::FullyConnectedOp& op) { const TensorVariant& ir_weights = op.getWeights(); const Shape& ir_weights_shape = ir_weights.getShape(); - auto& prev_nodes = node->getPrevNodes(); + auto& prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; + auto in_op = prev_nodes[0].op; // Get the input node tensor id in the DOM. - auto in = AF::id(tensorName(in_node)); + auto in = AF::id(tensorName(in_op)); // Create the output tensor in the DOM. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); string operation_name = out->name() + "_fully_connected_layer"; // Create the weights tensor in the DOM and use its id. @@ -186,23 +185,23 @@ void AclCppOpGenerator::visit(INode* node, ops::FullyConnectedOp& op) { _infBlock->call("run", {}, fully_layer); } -void AclCppOpGenerator::visit(INode* node, ops::CappedReluOp& op) { - genActivation(node, op, "LU_BOUNDED_RELU", op.getCap()); +void AclCppOpGenerator::visit(ops::CappedReluOp& op) { + genActivation(op, "LU_BOUNDED_RELU", op.getCap()); } -void AclCppOpGenerator::visit(INode* node, ops::BiasAddOp& op) { +void AclCppOpGenerator::visit(ops::BiasAddOp& op) { const auto& ir_biases = op.getWeights(); assert(ir_biases.getShape().rank() == 1); - auto& prev_nodes = node->getPrevNodes(); + auto& prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; + auto in_op = prev_nodes[0].op; // Get the input node tensor id in the DOM. - auto in = AF::id(tensorName(in_node)); + auto in = AF::id(tensorName(in_op)); // Create the output tensor in the DOM and obtain its identifier. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); // Prefix used for the name of variables related to the operation implementation. string operation_name = out->name() + "_bias_add_layer"; @@ -236,26 +235,24 @@ void AclCppOpGenerator::visit(INode* node, ops::BiasAddOp& op) { _infBlock->call("run", {}, arithmetic_add_layer); } -void AclCppOpGenerator::visit(INode* node, ops::VariableOp& op) { - // Axes order is HWC in the Model IR and WHC in the ACL library, so we are switching the first - // two dimensions. - genTensor(node, transposeShape<1, 0, 2>(op.getOutputShape(0))); +void AclCppOpGenerator::visit(ops::VariableOp& op) { + genTensor(op, transposeShape<1, 0, 2>(op.getOutputShape(0))); } -void AclCppOpGenerator::visit(INode* node, ops::ReluOp& op) { - genActivation(node, op, "RELU"); +void AclCppOpGenerator::visit(ops::ReluOp& op) { + genActivation(op, "RELU"); } -void AclCppOpGenerator::visit(INode* node, ops::ReshapeOp& op) { - auto& prev_nodes = node->getPrevNodes(); +void AclCppOpGenerator::visit(ops::ReshapeOp& op) { + auto& prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); // Get the id of the input tensor in the generated artifact. - auto in_node = prev_nodes[0].node; - auto in = AF::id(tensorName(in_node)); + auto in_op = prev_nodes[0].op; + auto in = AF::id(tensorName(in_op)); // Create the output tensor in the DOM and return its id. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); // Create an instance of the CLReshapeLayer class as a member of the artifact class. auto reshape_layer_var = _artifactClass->var(false, "arm_compute::CLReshapeLayer", @@ -269,25 +266,25 @@ void AclCppOpGenerator::visit(INode* node, ops::ReshapeOp& op) { _infBlock->call("run", {}, reshape_layer); } -void AclCppOpGenerator::visit(INode* node, ops::ScaleOp& op) { +void AclCppOpGenerator::visit(ops::ScaleOp& op) { // May be not a perfect implementation, using the CLPixelWiseMultiplication ACL function taking // two input tensors with the same shapes. - auto prev_nodes = node->getPrevNodes(); + auto prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; + auto in_op = prev_nodes[0].op; // Get input tensor identifier in the generated artifact. - auto in = AF::id(tensorName(in_node)); + auto in = AF::id(tensorName(in_op)); // Generate output tensor description in the DOM. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); auto prefix = out->name() + "_scale_layer"; // Create a CLPixelWiseMultiplication instance. auto scale_layer_var = _artifactClass->var(false, "arm_compute::CLPixelWiseMultiplication", prefix); auto scale_layer = scale_layer_var->use(); - auto scale_tensor = genTensor(prefix + "_scales", in_node->getOperation()->getOutputShape(0)); + auto scale_tensor = genTensor(prefix + "_scales", in_op->getOutputShape(0)); // Construct the vector containing scales. auto scales_var = _constrBlock->var("std::vector", prefix + "_scales"); @@ -317,22 +314,22 @@ void AclCppOpGenerator::visit(INode* node, ops::ScaleOp& op) { _infBlock->call("run", {}, scale_layer); } -void AclCppOpGenerator::visit(INode* node, ops::BatchNormOp& op) { +void AclCppOpGenerator::visit(ops::BatchNormOp& op) { // Not supported in our framework, but present in ACL API. throw AclCppException("Not supported in inference yet."); } -void AclCppOpGenerator::visit(INode* node, ops::DropoutOp& op) { +void AclCppOpGenerator::visit(ops::DropoutOp& op) { // Just copy input tensor to the output one. - auto prev_nodes = node->getPrevNodes(); - assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; + auto prev_ops = op.getPrevNodes(); + assert(prev_ops.size() == 1); + auto in_op = prev_ops[0].op; // Get input tensor identifier in the generated artifact. - auto in = AF::id(tensorName(in_node)); + auto in = AF::id(tensorName(in_op)); // Generate output tensor description in the DOM. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); // Create a CLCopy instance. auto copy_layer_var = _artifactClass->var(false, "arm_compute::CLCopy", @@ -346,27 +343,27 @@ void AclCppOpGenerator::visit(INode* node, ops::DropoutOp& op) { _infBlock->call("run", {}, copy_layer); } -void AclCppOpGenerator::visit(INode* node, ops::TanhOp& op) { - genActivation(node, op, "TANH"); +void AclCppOpGenerator::visit(ops::TanhOp& op) { + genActivation(op, "TANH"); } -void AclCppOpGenerator::visit(INode* node, ops::ElementwiseOp& op) { +void AclCppOpGenerator::visit(ops::ElementwiseOp& op) { // Create the output tensor in the DOM and obtain its identifier. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); - auto& prev_nodes = node->getPrevNodes(); + auto& prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() >= 2); - auto in_node1 = prev_nodes[0].node; + auto in_op1 = prev_nodes[0].op; // Get the identifier of the first input tensor in the DOM. - auto in1 = AF::id(tensorName(in_node1)); + auto in1 = AF::id(tensorName(in_op1)); for (int i = 1; i < prev_nodes.size(); ++i) { - auto in_node2 = prev_nodes[i].node; + auto in_op2 = prev_nodes[i].op; // Get the identifier of the second input tensor in the DOM. - auto in2 = AF::id(tensorName(in_node2)); + auto in2 = AF::id(tensorName(in_op2)); // Chaining the partial results of binary operations. // On the last iteration the result is saved in the node output. @@ -386,21 +383,20 @@ void AclCppOpGenerator::visit(INode* node, ops::ElementwiseOp& op) { } } -void AclCppOpGenerator::visit(INode* node, ops::DeConv2DOp& op) { - genConvolution(node, op, "arm_compute::CLDeconvolutionLayer", "_deconvolution_layer"); +void AclCppOpGenerator::visit(ops::DeConv2DOp& op) { + genConvolution(op, "arm_compute::CLDeconvolutionLayer", "_deconvolution_layer"); } -void AclCppOpGenerator::visit(INode* node, ops::EluOp& op) { +void AclCppOpGenerator::visit(ops::EluOp& op) { throw AclCppException("Not supported by the ACL library yet."); } -void AclCppOpGenerator::visit(INode *node, ops::PadOp &op) { +void AclCppOpGenerator::visit(ops::PadOp& op) { throw AclCppException("Not supported by the ACL library yet."); } template -void AclCppOpGenerator::genConvolution(INode* node, Op& op, const string& acl_func_name, - const string& suffix) { +void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, const string& suffix) { auto ir_weights = transposeTensor<1, 0, 2, 3>(make_shared(op.getKernel())); const Shape& ir_weights_shape = ir_weights->getShape(); assert(ir_weights_shape.rank() == 4); @@ -411,15 +407,15 @@ void AclCppOpGenerator::genConvolution(INode* node, Op& op, const string& acl_fu uint32_t pad_y = op.getPadding(1); assert(op.getPadding(2) == 0); - auto& prev_nodes = node->getPrevNodes(); + auto& prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); - auto in_node = prev_nodes[0].node; + auto in_op = prev_nodes[0].op; // Get the identifier of the input tensor in the DOM. - auto in = AF::id(tensorName(in_node)); + auto in = AF::id(tensorName(in_op)); // Create the output tensor in the DOM. - auto out = genTensor(node, transposeShape<1, 0, 2>(op.getOutputShape(0))); + auto out = genTensor(op, transposeShape<1, 0, 2>(op.getOutputShape(0))); string operation_name = out->name() + suffix; // Generate a tensor for weights (kernel) in the DOM. @@ -461,17 +457,16 @@ void AclCppOpGenerator::genConvolution(INode* node, Op& op, const string& acl_fu _infBlock->call("run", {}, conv_layer); } -void AclCppOpGenerator::genActivation(INode* node, OpDescription& op, - const std::string& activation_name, float a, float b) { - auto &prev_nodes = node->getPrevNodes(); +void AclCppOpGenerator::genActivation(mir::Operation& op, const std::string& activation_name, float a, float b) { + auto &prev_nodes = op.getPrevNodes(); assert(prev_nodes.size() == 1); // Get the id of the input tensor. - auto in_node = prev_nodes[0].node; - auto in = AF::id(tensorName(in_node)); + auto in_op = prev_nodes[0].op; + auto in = AF::id(tensorName(in_op)); // Create the output tensor in the DOM and return its id. - auto out = genTensor(node, op.getOutputShape(0)); + auto out = genTensor(op, op.getOutputShape(0)); auto prefix = out->name() + "_activation_layer"; // Create an instance of the ActivationLayerInfo class as a local variable in the artifact @@ -576,16 +571,16 @@ shared_ptr AclCppOpGenerator::genMultiplication(const string& prefix return out; } -string AclCppOpGenerator::tensorName(INode* node) const { +string AclCppOpGenerator::tensorName(Operation* op) const { string tensor_name; - if (!node->getName().empty()) { - tensor_name = "_" + node->getName(); + if (!op->getName().empty()) { + tensor_name = "_" + op->getName(); replace_if(tensor_name.begin(), tensor_name.end(), [](char c) { return std::isalnum(c) == 0; }, '_'); } else { - tensor_name = "tensor_" + to_string(node->getId()); + tensor_name = "tensor_" + to_string(op->getId()); } return tensor_name; @@ -622,14 +617,14 @@ shared_ptr AclCppOpGenerator::genTensor(const string& name, const Sh return id; } -shared_ptr AclCppOpGenerator::genTensor(INode* node, const Shape& ir_shape) { - if (node->getPrevNodes().empty()) - _inputs.insert(node); +std::shared_ptr AclCppOpGenerator::genTensor(mir::Operation& op, const Shape& ir_shape) { + if (op.getPrevNodes().empty()) + _inputs.insert(&op); - if (node->getNextNodes().empty()) - _outputs.insert(node); + if (op.getNextNodes().empty()) + _outputs.insert(&op); - return genTensor(tensorName(node), ir_shape, !node->getName().empty()); + return genTensor(tensorName(&op), ir_shape, !op.getName().empty()); } void AclCppOpGenerator::genNamed() { @@ -682,7 +677,7 @@ void AclCppOpGenerator::serializeTensor(const TensorVariant& tensor) { } } -void AclCppOpGenerator::visit(INode* node, ops::SqueezeOp& op) { +void AclCppOpGenerator::visit(ops::SqueezeOp& op) { assert(false && "Unimplemented operation: Squeeze"); } diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp index 2ce22d1..c663fd0 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp @@ -84,7 +84,7 @@ void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& lp) { auto inputs = getMIRInputsForLayer(lp); auto params = createOpParams(lp); - INode::IODescriptorVector outputs; + std::vector outputs; CaffeOpType op_type = _operatorTypes.at(lp.type()); switch (op_type) { @@ -234,8 +234,8 @@ std::shared_ptr CaffeImporter::createTensor(const BlobProto& bp) { return tensor; } -INode::IODescriptorVector CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) { - INode::IODescriptorVector inputs; +std::vector CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) { + std::vector inputs; for (const auto& input_name : layer.bottom()) inputs.push_back(_blobNameToIODescriptor.at(input_name)); @@ -269,13 +269,13 @@ void CaffeImporter::setGraphOutputs() { // For now, we assume that: // - there is exactly one output; // - the output is from the last layer. - _graph->markOutput(_blobNameToIODescriptor[last_layer.top(0)].node); + _graph->markOutput(_blobNameToIODescriptor[last_layer.top(0)].op); } void CaffeImporter::setIrNodeNames() { // FIXME Support multiple outputs. for (auto& item : _blobNameToIODescriptor) - item.second.node->setName(item.first); + item.second.op->setName(item.first); } PassData CaffeImporter::run(PassData) { diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 24c9c06..6bf4711 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -43,7 +43,7 @@ #include #include - +#include namespace nnc { @@ -215,11 +215,11 @@ fixGroupedKernel(int groups, std::shared_ptr folded_kernel) { return unfold_kernel; } -INode::IODescriptorVector CaffeOpCreator::convertInput(const LayerParameter& layer) { +std::vector CaffeOpCreator::convertInput(const LayerParameter& layer) { const auto& params = layer.input_param(); const auto num_inputs = layer.top_size(); const auto num_shapes = params.shape_size(); - INode::IODescriptorVector descriptors; + std::vector descriptors; assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes."); @@ -236,7 +236,7 @@ INode::IODescriptorVector CaffeOpCreator::convertInput(const LayerParameter& lay // TODO: Implement a more consistent way of handling shapes within the model. if (shape.rank() == 3) shape = Shape{shape.dim(1), shape.dim(2), shape.dim(0)}; - variable->getOperation()->setOutputShape(0, shape); + variable->setOutputShape(0, shape); descriptors.push_back(variable->getOutput(0)); } @@ -254,15 +254,15 @@ void CaffeOpCreator::checkConvolution(const ConvolutionParameter& opts, problems_op_set.insert("Conv2D: Unsupported number of pads"); } -INode::IODescriptorVector -CaffeOpCreator::convertConvolution(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertConvolution(const std::vector& inputs, const std::vector>& params, const caffe::ConvolutionParameter& opts) { ops::PaddingType pad_type = ops::PaddingType::Custom; Shape stride_shape = getConvStride(opts); std::shared_ptr unfolded_tensor = params[0]; - INode* conv2d; + Operation* conv2d; auto in_group_size = params[0]->getShape().dim(2); auto out_channels = params[0]->getShape().dim(3); int32_t num_groups = opts.group(); @@ -289,12 +289,12 @@ CaffeOpCreator::convertConvolution(const INode::IODescriptorVector& inputs, pad_h = pad_w = opts.pad(0); if (is_depthwise) { - auto op = static_cast(conv2d->getOperation()); + auto op = static_cast(conv2d); op->setPadding(0, pad_h); op->setPadding(1, pad_w); op->setPadding(2, 0); } else { - auto op = static_cast(conv2d->getOperation()); + auto op = static_cast(conv2d); op->setPadding(0, pad_h); op->setPadding(1, pad_w); op->setPadding(2, 0); @@ -326,8 +326,8 @@ void CaffeOpCreator::checkInnerProduct(const InnerProductParameter& opts, * implement it correctly. * @todo Support axis and transpose parameters as needed. */ -INode::IODescriptorVector -CaffeOpCreator::convertInnerProduct(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertInnerProduct(const std::vector& inputs, const std::vector>& params, const caffe::InnerProductParameter& opts) { // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize] @@ -335,7 +335,7 @@ CaffeOpCreator::convertInnerProduct(const INode::IODescriptorVector& inputs, auto reshape = createOp(inputs); int32_t fc_input_size = static_cast( params[0]->getShape().numElements()) / opts.num_output(); - reshape->getOperation()->setOutputShape(0, {1, fc_input_size}); + reshape->setOutputShape(0, {1, fc_input_size}); auto fully_connected = createOp({reshape->getOutput(0)}, std::move(*params[0])); @@ -349,8 +349,8 @@ CaffeOpCreator::convertInnerProduct(const INode::IODescriptorVector& inputs, } } -INode::IODescriptorVector -CaffeOpCreator::convertConcat(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertConcat(const std::vector& inputs, const caffe::ConcatParameter& opts) { auto result = createOp(inputs, inputs.size(), getAxisValue(opts)); return {result->getOutput(0)}; @@ -369,8 +369,8 @@ void CaffeOpCreator::checkPooling(const PoolingParameter& opts, problemsOpSet.insert("Pooling: conflicting padding properties in pooling"); } -INode::IODescriptorVector -CaffeOpCreator::convertPooling(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertPooling(const std::vector& inputs, const caffe::PoolingParameter& opts) { Shape window_shape = getPoolWindowShape(opts); ops::PoolOp::PoolingType pool_type = getPoolingType(opts); @@ -393,7 +393,7 @@ CaffeOpCreator::convertPooling(const INode::IODescriptorVector& inputs, pad_type, border_type); // Set pads - auto op = static_cast(pooling->getOperation()); + auto op = static_cast(pooling); int pad_h = opts.has_pad_h() ? opts.pad_h() : 0; int pad_w = opts.has_pad_w() ? opts.pad_w() : 0; if (opts.has_pad()) @@ -405,8 +405,8 @@ CaffeOpCreator::convertPooling(const INode::IODescriptorVector& inputs, return {pooling->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertSoftmax(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertSoftmax(const std::vector& inputs, const caffe::SoftmaxParameter& opts) { auto softmax = createOp(inputs, getAxisValue(opts)); return {softmax->getOutput(0)}; @@ -433,12 +433,12 @@ void CaffeOpCreator::checkReshape(const ReshapeParameter& opts, * @todo Decide how to react to the absence of "shape" parameter. * @todo Support zero values in "shape" parameter. */ -INode::IODescriptorVector -CaffeOpCreator::convertReshape(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertReshape(const std::vector& inputs, const caffe::ReshapeParameter& opts) { auto reshape = createOp(inputs); Shape new_shape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size()); - reshape->getOperation()->setOutputShape(0, new_shape); + reshape->setOutputShape(0, new_shape); return {reshape->getOutput(0)}; } @@ -448,14 +448,14 @@ void CaffeOpCreator::checkReLU(const ReLUParameter& opts, problems_op_set.insert("ReLU layer negative_slope param is not supported yet."); } -INode::IODescriptorVector -CaffeOpCreator::convertReLU(const INode::IODescriptorVector& inputs) { +std::vector +CaffeOpCreator::convertReLU(const std::vector& inputs) { auto relu = createOp(inputs); return {relu->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertScale(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertScale(const std::vector& inputs, const std::vector>& params, const caffe::ScaleParameter& opts) { auto scale = createOp(inputs, std::move(*params[0])); @@ -476,8 +476,8 @@ void CaffeOpCreator::checkBatchNorm(const std::vector> problems_op_set.insert("Unexpected shape of scale parameter in batch norm"); } -INode::IODescriptorVector -CaffeOpCreator::convertBatchNorm(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertBatchNorm(const std::vector& inputs, const std::vector>& params, const caffe::BatchNormParameter& opts) { float eps = opts.eps(); @@ -507,15 +507,15 @@ CaffeOpCreator::convertBatchNorm(const INode::IODescriptorVector& inputs, return {scale->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertDropout(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertDropout(const std::vector& inputs, const caffe::DropoutParameter& opts) { auto dropout = createOp(inputs, opts.dropout_ratio()); return {dropout->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertDeconvolution(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertDeconvolution(const std::vector& inputs, const std::vector>& params, const caffe::ConvolutionParameter& opts) { ops::PaddingType pad_type = ops::PaddingType::Custom; @@ -530,7 +530,7 @@ CaffeOpCreator::convertDeconvolution(const INode::IODescriptorVector& inputs, stride_shape, pad_type); // Set pads - auto op = static_cast(deconv2d->getOperation()); + auto op = static_cast(deconv2d); int pad_h = opts.has_pad_h() ? opts.pad_h() : 0; int pad_w = opts.has_pad_w() ? opts.pad_w() : 0; @@ -549,22 +549,22 @@ CaffeOpCreator::convertDeconvolution(const INode::IODescriptorVector& inputs, } } -INode::IODescriptorVector -CaffeOpCreator::convertELU(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertELU(const std::vector& inputs, const std::vector>& params, const caffe::ELUParameter& opts) { auto elu = createOp(inputs, opts.alpha()); return {elu->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertTanH(const INode::IODescriptorVector& inputs) { +std::vector +CaffeOpCreator::convertTanH(const std::vector& inputs) { auto tanh = createOp(inputs); return {tanh->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertEltwise(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertEltwise(const std::vector& inputs, const caffe::EltwiseParameter& opts) { ops::ElementwiseOp::OpType optype; switch (opts.operation()){ @@ -585,16 +585,16 @@ CaffeOpCreator::convertEltwise(const INode::IODescriptorVector& inputs, return {elementwise->getOutput(0)}; } -INode::IODescriptorVector -CaffeOpCreator::convertSplit(const INode::IODescriptorVector& inputs, +std::vector +CaffeOpCreator::convertSplit(const std::vector& inputs, const caffe::LayerParameter& lp) { - INode::IODescriptorVector outputs(lp.top_size(), inputs.at(0)); + std::vector outputs(lp.top_size(), inputs.at(0)); return outputs; } -void CaffeOpCreator::connectInputs(INode* node, const INode::IODescriptorVector& inputs) { +void CaffeOpCreator::connectInputs(mir::Operation* op, const std::vector& inputs) { for (int i = 0; i < static_cast(inputs.size()); ++i) - node->connectInputTo(i, inputs[i]); + op->connectInputTo(i, inputs[i]); } } // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h index ee8c1a6..ce0d5e7 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h @@ -22,8 +22,7 @@ #include #include -#include "core/modelIR/graph.h" -#include "core/modelIR/ir_node.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/TensorVariant.h" #include "core/modelIR/operations/common.h" #include "core/modelIR/Shape.h" @@ -33,7 +32,6 @@ namespace nnc { using nnc::mir::Graph; -using nnc::mir::INode; using IrTensor = nnc::mir::TensorVariant; using nnc::mir::Shape; @@ -41,59 +39,60 @@ class CaffeOpCreator { public: explicit CaffeOpCreator(Graph* g) : _graph(g) {}; - INode::IODescriptorVector convertInput(const caffe::LayerParameter& layer); + std::vector convertInput(const caffe::LayerParameter& layer); - INode::IODescriptorVector - convertConvolution(const INode::IODescriptorVector& inputs, + std::vector + convertConvolution(const std::vector& inputs, const std::vector>& params, const caffe::ConvolutionParameter& opts); - INode::IODescriptorVector - convertInnerProduct(const INode::IODescriptorVector& inputs, + std::vector + convertInnerProduct(const std::vector& inputs, const std::vector>& params, const caffe::InnerProductParameter& opts); - INode::IODescriptorVector convertConcat(const INode::IODescriptorVector& inputs, - const caffe::ConcatParameter& opts); + std::vector convertConcat(const std::vector& inputs, + const caffe::ConcatParameter& opts); - INode::IODescriptorVector convertPooling(const INode::IODescriptorVector& inputs, - const caffe::PoolingParameter& opts); + std::vector convertPooling(const std::vector& inputs, + const caffe::PoolingParameter& opts); - INode::IODescriptorVector convertSoftmax(const INode::IODescriptorVector& inputs, - const caffe::SoftmaxParameter& opts); + std::vector convertSoftmax(const std::vector& inputs, + const caffe::SoftmaxParameter& opts); - INode::IODescriptorVector convertReshape(const INode::IODescriptorVector& inputs, - const caffe::ReshapeParameter& opts); + std::vector convertReshape(const std::vector& inputs, + const caffe::ReshapeParameter& opts); - INode::IODescriptorVector convertReLU(const INode::IODescriptorVector& inputs); + std::vector convertReLU(const std::vector& inputs); - INode::IODescriptorVector convertScale(const INode::IODescriptorVector& inputs, - const std::vector>& params, - const caffe::ScaleParameter& opts); + std::vector convertScale(const std::vector& inputs, + const std::vector>& params, + const caffe::ScaleParameter& opts); - INode::IODescriptorVector convertBatchNorm(const INode::IODescriptorVector& inputs, - const std::vector>& params, - const caffe::BatchNormParameter& layer); + std::vector + convertBatchNorm(const std::vector& inputs, + const std::vector>& params, + const caffe::BatchNormParameter& layer); - INode::IODescriptorVector convertDropout(const INode::IODescriptorVector& inputs, - const caffe::DropoutParameter& opts); + std::vector convertDropout(const std::vector& inputs, + const caffe::DropoutParameter& opts); - INode::IODescriptorVector - convertDeconvolution(const INode::IODescriptorVector& inputs, + std::vector + convertDeconvolution(const std::vector& inputs, const std::vector>& params, const caffe::ConvolutionParameter& opts); - INode::IODescriptorVector convertELU(const INode::IODescriptorVector& inputs, - const std::vector>& params, - const caffe::ELUParameter& opts); + std::vector convertELU(const std::vector& inputs, + const std::vector>& params, + const caffe::ELUParameter& opts); - INode::IODescriptorVector convertTanH(const INode::IODescriptorVector& inputs); + std::vector convertTanH(const std::vector& inputs); - INode::IODescriptorVector convertEltwise(const INode::IODescriptorVector& inputs, - const caffe::EltwiseParameter& opts); + std::vector convertEltwise(const std::vector& inputs, + const caffe::EltwiseParameter& opts); - INode::IODescriptorVector convertSplit(const INode::IODescriptorVector& inputs, - const caffe::LayerParameter& lp); + std::vector convertSplit(const std::vector& inputs, + const caffe::LayerParameter& lp); void checkConvolution(const caffe::ConvolutionParameter& layer, std::set&); @@ -111,18 +110,19 @@ public: private: Graph* _graph = nullptr; - void connectInputs(INode*, const INode::IODescriptorVector& inputs); + void connectInputs(mir::Operation*, const std::vector& inputs); - template - INode* createOp(const INode::IODescriptorVector& inputs, Types&& ... args); + template + mir::Operation* createOp(const std::vector& inputs, Types&&... args); }; -template -INode* CaffeOpCreator::createOp(const INode::IODescriptorVector& inputs, Types&& ... args) { +template +mir::Operation* +CaffeOpCreator::createOp(const std::vector& inputs, Types&&... args) { // TODO: set operation names - auto node = _graph->create("", std::forward(args)...); - connectInputs(node, inputs); - return node; + auto op = _graph->create("", std::forward(args)...); + connectInputs(op, inputs); + return op; } } // namespace nnc diff --git a/contrib/nnc/passes/interpreter/Interpreter.cpp b/contrib/nnc/passes/interpreter/Interpreter.cpp index af38067..8942abc 100644 --- a/contrib/nnc/passes/interpreter/Interpreter.cpp +++ b/contrib/nnc/passes/interpreter/Interpreter.cpp @@ -54,8 +54,7 @@ #include "ops/Dropout.h" #include "ops/BatchNorm.h" -namespace nnc -{ +namespace nnc { using namespace nnc::mir; @@ -63,21 +62,19 @@ std::vector &NNInterpreter::var(size_t id) { return vars[id]; } void NNInterpreter::setInput(const std::string &name, const TensorVariant& t) { data.emplace(name, t); } -void NNInterpreter::visit(INode::Ref node, ops::VariableOp &op) -{ - mapByName(node); +void NNInterpreter::visit(ops::VariableOp& op) { + mapByName(&op); (void)op; - auto it = data.find(node->getName()); + auto it = data.find(op.getName()); if( it == data.end() ) { - throw std::runtime_error("Can't find data for node \"" + node->getName() + ". Input data was not set correctly?"); + throw std::runtime_error("Can't find data for node \"" + op.getName() + ". Input data was not set correctly?"); } - var(node->getId()) = {it->second}; + var(op.getId()) = {it->second}; } -std::vector &NNInterpreter::getResult(INode::Ref node) -{ - auto res = vars.find(node->getId()); +std::vector &NNInterpreter::getResult(Operation* op) { + auto res = vars.find(op->getId()); if (res != vars.end()) { return res->second; @@ -89,153 +86,140 @@ std::vector &NNInterpreter::getResult(INode::Ref node) } std::vector &NNInterpreter::getOperationResult(const std::string &nodeName) { - auto it = nodeByName.find(nodeName); - if (it == nodeByName.end()) + auto it = _opByName.find(nodeName); + if (it == _opByName.end()) throw std::runtime_error("Node not found <" + nodeName + ">"); return getResult(it->second); } -void NNInterpreter::visit(INode::Ref node, ops::ConcatOp &op) -{ - mapByName(node); - auto &operands = node->getPrevNodes(); +void NNInterpreter::visit(ops::ConcatOp& op) { + mapByName(&op); + auto &operands = op.getPrevNodes(); std::vector ins; for (auto &in : operands) { - ins.push_back(var(in.node->getId())[in.index]); + ins.push_back(var(in.op->getId())[in.index]); } - var(node->getId()) = Concat(ins, op.getOutputShape(0), op.getAxis())(); + var(op.getId()) = Concat(ins, op.getOutputShape(0), op.getAxis())(); } -void NNInterpreter::visit(INode::Ref node, ops::Conv2DOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - var(node->getId()) = Conv2D(var(operand.node->getId())[operand.index], op)(); +void NNInterpreter::visit(ops::Conv2DOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + var(op.getId()) = Conv2D(var(operand.op->getId())[operand.index], op)(); } -void NNInterpreter::visit(INode::Ref node, ops::ReshapeOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = Reshape(input, op.getOutputShape(0))(); +void NNInterpreter::visit(ops::ReshapeOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + auto input = var(operand.op->getId())[operand.index]; + var(op.getId()) = Reshape(input, op.getOutputShape(0))(); } -void NNInterpreter::visit(INode::Ref node, ops::ReluOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - Tensor input(var(operand.node->getId())[operand.index]); - var(node->getId()) = Fill( +void NNInterpreter::visit(ops::ReluOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + Tensor input(var(operand.op->getId())[operand.index]); + var(op.getId()) = Fill( op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })(); } -void NNInterpreter::visit(INode::Ref node, ops::SoftmaxOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = Softmax(op.getInputShape(0), input, op.getAxis())(); +void NNInterpreter::visit(ops::SoftmaxOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + auto input = var(operand.op->getId())[operand.index]; + var(op.getId()) = Softmax(op.getInputShape(0), input, op.getAxis())(); } -void NNInterpreter::visit(INode::Ref node, ops::PoolOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = Pool(input, op)(); +void NNInterpreter::visit(ops::PoolOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + auto input = var(operand.op->getId())[operand.index]; + var(op.getId()) = Pool(input, op)(); } -void NNInterpreter::visit(INode::Ref node, ops::FullyConnectedOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - TensorVariant input = var(operand.node->getId())[operand.index]; - var(node->getId()) = FullyConnected(input, op)(); +void NNInterpreter::visit(ops::FullyConnectedOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + TensorVariant input = var(operand.op->getId())[operand.index]; + var(op.getId()) = FullyConnected(input, op)(); } -void NNInterpreter::visit(INode *node, ops::CappedReluOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - Tensor input(var(operand.node->getId())[operand.index]); - var(node->getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { +void NNInterpreter::visit(ops::CappedReluOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + Tensor input(var(operand.op->getId())[operand.index]); + var(op.getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { return std::min(std::max(input.at(id), 0.0f), op.getCap()); })(); } -void NNInterpreter::visit(INode *node, ops::DepthwiseConv2DOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - TensorVariant input(var(operand.node->getId())[operand.index]); - var(node->getId()) = DepthwiseConv2D(input, op)(); +void NNInterpreter::visit(ops::DepthwiseConv2DOp& op){ + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + TensorVariant input(var(operand.op->getId())[operand.index]); + var(op.getId()) = DepthwiseConv2D(input, op)(); } -void NNInterpreter::visit(INode *node, ops::BiasAddOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = BiasAdd(input, op.getWeights(), op.getOutputShape(0))(); +void NNInterpreter::visit(ops::BiasAddOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + auto input = var(operand.op->getId())[operand.index]; + var(op.getId()) = BiasAdd(input, op.getWeights(), op.getOutputShape(0))(); } -void NNInterpreter::visit(INode *node, ops::BatchNormOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - TensorVariant input(var(operand.node->getId())[operand.index]); +void NNInterpreter::visit(ops::BatchNormOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + TensorVariant input(var(operand.op->getId())[operand.index]); // TODO implement this - var(node->getId()) = BatchNorm(input, op)(); + var(op.getId()) = BatchNorm(input, op)(); } -void NNInterpreter::visit(INode *node, ops::ScaleOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - TensorVariant input(var(operand.node->getId())[operand.index]); +void NNInterpreter::visit(ops::ScaleOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + TensorVariant input(var(operand.op->getId())[operand.index]); // TODO implement this - var(node->getId()) = Scale(input, op)(); + var(op.getId()) = Scale(input, op)(); } -void NNInterpreter::visit(INode *node, ops::DropoutOp &op) -{ - mapByName(node); - auto operand = node->getPrevNodes()[0]; - TensorVariant input(var(operand.node->getId())[operand.index]); +void NNInterpreter::visit(ops::DropoutOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + TensorVariant input(var(operand.op->getId())[operand.index]); // TODO implement this - var(node->getId()) = Dropout(input, op)(); + var(op.getId()) = Dropout(input, op)(); } -void NNInterpreter::mapByName(INode::Ref n) { - auto &nodeName = n->getName(); - if (nodeByName.find(nodeName) != nodeByName.end()) +void NNInterpreter::mapByName(Operation* op) { + auto &nodeName = op->getName(); + if (_opByName.find(nodeName) != _opByName.end()) { // TODO use common debug macro // std::cout << "Warning: duplicate node name <" + nodeName + "> ignore node." << std::endl; return; } - nodeByName[nodeName] = n; + _opByName[nodeName] = op; } -void NNInterpreter::visit(INode::Ref node, ops::TanhOp &op) { - mapByName(node); - auto operand = node->getPrevNodes()[0]; - Tensor input(var(operand.node->getId())[operand.index]); - var(node->getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { +void NNInterpreter::visit(ops::TanhOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + Tensor input(var(operand.op->getId())[operand.index]); + var(op.getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { return std::tanh(input.at(id)); })(); } -void NNInterpreter::visit(INode::Ref node, ops::ElementwiseOp &op) { - mapByName(node); - auto operands = node->getPrevNodes(); +void NNInterpreter::visit(ops::ElementwiseOp& op) { + mapByName(&op); + auto operands = op.getPrevNodes(); std::vector> ins; for (auto &in : operands) { - ins.push_back(Tensor(var(in.node->getId())[in.index])); + ins.push_back(Tensor(var(in.op->getId())[in.index])); } float (*func)(float,float); // Another dirty hack switch (op.getOpType()) { @@ -251,7 +235,7 @@ void NNInterpreter::visit(INode::Ref node, ops::ElementwiseOp &op) { default: assert(false && "Not supported Optype"); } - var(node->getId()) = Fill(op.getOutputShape(0), [&func, &ins, &op](const Index &id) { + var(op.getId()) = Fill(op.getOutputShape(0), [&func, &ins, &op](const Index &id) { float acc = ins[0].at(id); for (size_t i = 1; i < ins.size() ; i++) acc = func(acc, ins[i].at(id)); @@ -259,17 +243,17 @@ void NNInterpreter::visit(INode::Ref node, ops::ElementwiseOp &op) { })(); } -void NNInterpreter::visit(INode::Ref node, ops::DeConv2DOp &op) { - mapByName(node); - auto operand = node->getPrevNodes()[0]; - var(node->getId()) = DeConv2D(var(operand.node->getId())[operand.index], op)(); +void NNInterpreter::visit(ops::DeConv2DOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + var(op.getId()) = DeConv2D(var(operand.op->getId())[operand.index], op)(); } -void NNInterpreter::visit(INode::Ref node, ops::EluOp &op) { - mapByName(node); - auto operand = node->getPrevNodes()[0]; - Tensor input(var(operand.node->getId())[operand.index]); - var(node->getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { +void NNInterpreter::visit(ops::EluOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + Tensor input(var(operand.op->getId())[operand.index]); + var(op.getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { if (input.at(id) >= 0) return input.at(id); else @@ -277,15 +261,15 @@ void NNInterpreter::visit(INode::Ref node, ops::EluOp &op) { })(); } -void NNInterpreter::visit(INode* node, ops::SqueezeOp& op) { - mapByName(node); - auto operand = node->getPrevNodes()[0]; - auto& input = var(operand.node->getId())[operand.index]; +void NNInterpreter::visit(ops::SqueezeOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + auto& input = var(operand.op->getId())[operand.index]; //Squeeze is just a special case of reshape - var(node->getId()) = Reshape(input, op.getOutputShape(0))(); + var(op.getId()) = Reshape(input, op.getOutputShape(0))(); } -void NNInterpreter::visit(INode* node, ops::PadOp& op) { +void NNInterpreter::visit(ops::PadOp& op) { throw PassException("Not implemented yet"); } diff --git a/contrib/nnc/passes/interpreter/interpreter_pass.cpp b/contrib/nnc/passes/interpreter/interpreter_pass.cpp index 7f9d71d..d1c2cc9 100644 --- a/contrib/nnc/passes/interpreter/interpreter_pass.cpp +++ b/contrib/nnc/passes/interpreter/interpreter_pass.cpp @@ -37,7 +37,7 @@ #include "passes/interpreter/InterpreterPass.h" #include "core/modelIR/ShapeInference.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/ShapeRange.h" #include "core/modelIR/Tensor.h" @@ -100,14 +100,14 @@ PassData InterpreterPass::run(PassData data) g->accept(&shapeInference); - // Check nodes - auto inputNode = g->getInput(cli::interInNode); - if (inputNode == nullptr) { + // Check ops + auto inputOp = g->getInput(cli::interInNode); + if (inputOp == nullptr) { throw PassException("input node <" + cli::interInNode +"> not found" ); } - auto input = loadInput(inputNode->getOperation()->getOutputShape(0)); + auto input = loadInput(inputOp->getOutputShape(0)); interpreter.setInput(cli::interInNode, input); g->accept(&interpreter); diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index 1848da7..e6dfc10 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -19,7 +19,6 @@ #include #include #include -#include "core/modelIR/ir_node.h" #include "core/modelIR/operations/VariableOp.h" #include "core/modelIR/TensorVariant.h" #include "onnx/onnx_pb.h" @@ -27,6 +26,7 @@ #include "passes/common_frontend/model_allocation.h" #include "ONNXImporterImpl.h" #include "ONNXPerfectHash.h" +#include namespace nnc { @@ -116,8 +116,8 @@ void ONNXImporterImpl::createGraphInputs() { auto name = input.name(); // Every VariableOp relates to one graph input - auto node = _graph->create(name); - _opsForBlobsTheyOutput[name] = node; + auto op = _graph->create(name); + _opsForBlobsTheyOutput[name] = op; if (onnx_tensors.find(name) != onnx_tensors.end()) { const onnx::TensorProto* onnx_tensor = onnx_tensors[name]; @@ -125,14 +125,14 @@ void ONNXImporterImpl::createGraphInputs() { mir::Shape input_shape = ShapeHelper::createShape(onnx_tensor->dims(), static_cast(onnx_tensor->dims_size())); // WARNING! Temporary solution! - node->getOperation()->setOutputShape(0, input_shape); + op->setOutputShape(0, input_shape); } else { assert(!name.compare("data")); _inputTensors[name] = createTensor(nullptr); - // TODO: should we update node with special shape? + // TODO: should we update op with special shape? mir::Shape input_shape = ShapeHelper::createShape(std::vector(), 0); // WARNING! Temporary solution! - node->getOperation()->setOutputShape(0, input_shape); + op->setOutputShape(0, input_shape); } std::cout << "Node name '" << name << "' added\n"; // < std::endl; } @@ -156,7 +156,7 @@ mir::Graph *ONNXImporterImpl::createIR() { for (auto onnxNode : _model->graph().node()) { assert(onnxNode.has_op_type()); auto op_type = onnxNode.op_type().c_str(); - std::vector input_nodes; + std::vector input_nodes; // Fill inputs of the given node for (auto name : onnxNode.input()) { if (_opsForBlobsTheyOutput.find(name) != _opsForBlobsTheyOutput.end()) @@ -165,8 +165,8 @@ mir::Graph *ONNXImporterImpl::createIR() { std::cout << "Node name '" << name << "' was not found\n"; } std::vector> params; - std::vector outputs; - mir::INode *prev; + std::vector outputs; + mir::Operation* prev; auto *opType = ONNXPerfectHash::getONNXOpType(op_type, onnxNode.op_type().size()); // 2 variables used as result of getXXXAttribute() bool found; @@ -200,7 +200,7 @@ mir::Graph *ONNXImporterImpl::createIR() { throw PassException("Concat must have 'axis' attribute"); break; case ONNXOpCode::opReshape: - outputs = _opCreator.createReshape(input_nodes[0], input_nodes[1]->getOperation()->getOutputShape(0)); + outputs = _opCreator.createReshape(input_nodes[0], input_nodes[1]->getOutputShape(0)); break; case ONNXOpCode::opRelu: outputs = _opCreator.createRelu(input_nodes); diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h index d8387a8..bdf97d4 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h @@ -22,7 +22,7 @@ #include #include -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "ONNXOpType.h" #include "ONNXOpCreator.h" #include "passes/common_frontend/nn_importer.h" @@ -47,11 +47,11 @@ private: // This map maps caffe tensor names to MIR operations/nodes // that correspond to operations having these tensors as output. - std::map _opsForBlobsTheyOutput; + std::map _opsForBlobsTheyOutput; // This map keeps named tensors used as graph input initializers. std::map> _inputTensors; - std::vector _graphOutputs; + std::vector _graphOutputs; std::string _modelFilename; std::unique_ptr _model; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 7d3d96d..0348082 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -17,7 +17,7 @@ #include #include #include "core/modelIR/Index.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/ShapeRange.h" #include "core/modelIR/Tensor.h" #include "core/modelIR/operations/BatchNormOp.h" @@ -42,49 +42,49 @@ namespace nnc { using namespace mir; -std::vector ONNXOpCreator::createConv2D(InputOps inputs, InputParams params, +std::vector ONNXOpCreator::createConv2D(InputOps inputs, InputParams params, ::onnx::NodeProto node) { - return std::vector(); + return std::vector(); } -std::vector ONNXOpCreator::createConcat(InputOps inputs, int axis) { +std::vector ONNXOpCreator::createConcat(InputOps inputs, int axis) { return createOp(inputs, inputs.size(), axis); } -std::vector ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode opCode) { - return std::vector(); +std::vector ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode opCode) { + return std::vector(); } -std::vector ONNXOpCreator::createSoftmax(InputOps inputs, int axis) { +std::vector ONNXOpCreator::createSoftmax(InputOps inputs, int axis) { return createOp(inputs, axis); } -std::vector ONNXOpCreator::createReshape(INode::Ref inputData, Shape outputShape) { - std::vector inputNodes; +std::vector ONNXOpCreator::createReshape(Operation* inputData, Shape outputShape) { + std::vector inputNodes; inputNodes.push_back(inputData); auto outputs = createOp(inputNodes); - outputs[0]->getOperation()->setOutputShape(0, outputShape); + outputs[0]->setOutputShape(0, outputShape); return outputs; } -std::vector ONNXOpCreator::createRelu(InputOps inputs) { +std::vector ONNXOpCreator::createRelu(InputOps inputs) { assert(inputs.size() == 1); return createOp(inputs); } -std::vector ONNXOpCreator::createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node) { - return std::vector(); +std::vector ONNXOpCreator::createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node) { + return std::vector(); } -std::vector ONNXOpCreator::createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node) { - return std::vector(); +std::vector ONNXOpCreator::createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node) { + return std::vector(); } -std::vector ONNXOpCreator::createDropout(InputOps inputs, float ratio) { +std::vector ONNXOpCreator::createDropout(InputOps inputs, float ratio) { return createOp(inputs, ratio); } -void ONNXOpCreator::connectInputs(INode::Ref op, InputOps inputs) { +void ONNXOpCreator::connectInputs(Operation* op, InputOps inputs) { // TODO: this part doesn't support the situation where an operator takes as input // some tensor that is not the 0th output of some other operator for (int i = 0; i < static_cast(inputs.size()); ++i) diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index ef282e8..a20d723 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -21,8 +21,7 @@ #include #include #include -#include "core/modelIR/graph.h" -#include "core/modelIR/ir_node.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/TensorVariant.h" #include "core/modelIR/operations/common.h" #include "core/modelIR/Shape.h" @@ -33,32 +32,32 @@ namespace nnc { class ONNXOpCreator { public: - using InputOps = std::vector&; + using InputOps = std::vector&; using InputParams = std::vector>&; ONNXOpCreator() {}; - std::vector createConv2D(InputOps inputs, InputParams params, ::onnx::NodeProto node); - std::vector createConcat(InputOps inputs, int axis); - std::vector createPool(InputOps inputs, ONNXOpCode opCode); - std::vector createSoftmax(InputOps inputs, int axis); - std::vector createReshape(nnc::mir::INode::Ref inputData, nnc::mir::Shape outputShape); - std::vector createRelu(InputOps inputs); - std::vector createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node); - std::vector createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node); - std::vector createDropout(InputOps inputs, float ratio); + std::vector createConv2D(InputOps inputs, InputParams params, ::onnx::NodeProto node); + std::vector createConcat(InputOps inputs, int axis); + std::vector createPool(InputOps inputs, ONNXOpCode opCode); + std::vector createSoftmax(InputOps inputs, int axis); + std::vector createReshape(nnc::mir::Operation* inputData, nnc::mir::Shape outputShape); + std::vector createRelu(InputOps inputs); + std::vector createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node); + std::vector createBatchNorm(InputOps inputs, InputParams params, ::onnx::NodeProto node); + std::vector createDropout(InputOps inputs, float ratio); void setMirGraph(mir::Graph* g){ _graph = g; } private: - void connectInputs(nnc::mir::INode::Ref op, std::vector& inputs); + void connectInputs(nnc::mir::Operation* op, std::vector& inputs); template - std::vector createOp(std::vector& inputs, Types&&... args); + std::vector createOp(std::vector& inputs, Types&&... args); mir::Graph* _graph = nullptr; }; template -std::vector ONNXOpCreator::createOp(std::vector& inputs, Types&&... args) { - std::vector outputs; +std::vector ONNXOpCreator::createOp(std::vector& inputs, Types&&... args) { + std::vector outputs; // TODO: set operation names auto op = _graph->create("", std::forward(args)...); diff --git a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp index 6a776bf..82e7519 100644 --- a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp +++ b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp @@ -240,7 +240,7 @@ void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAna printTmpTensors(out, ma, _formattedTensors, op); // materialize call out << " " << op._opName << "("; - const auto &prevNodes = op._node->getPrevNodes(); + const auto &prevNodes = op._op->getPrevNodes(); const auto &outTensors = op._outputs; vector args; args.reserve(prevNodes.size() + outTensors.size() + 1); diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp index c897e16..8ded1d1 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp @@ -18,7 +18,6 @@ #include #include "ModelAnalyzer.h" -#include "core/modelIR/ir_node.h" #include "core/modelIR/Shape.h" #include "core/modelIR/ShapeRange.h" @@ -39,6 +38,8 @@ #include "core/modelIR/operations/DropoutOp.h" #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/ElementwiseOp.h" +#include "core/modelIR/operations/VariableOp.h" +#include "core/modelIR/operations/SqueezeOp.h" using namespace std; @@ -47,57 +48,57 @@ namespace nnc using namespace nnc::mir; -void ModelAnalyzer::addOpDescr(INode *node, const string &opName) +void ModelAnalyzer::addOpDescr(Operation* op, const string& opName) { OpDescr::Type type = OpDescr::Type::ORDINARY; vector nodeOutputs; - const std::string &name = node->getName(); + const std::string &name = op->getName(); size_t nodeTid = INVALID_TENSOR_ID; - if (node->getPrevNodes().empty()) + if (op->getPrevNodes().empty()) { - // process input node - Shape inputShape = node->getOperation()->getOutputShape(0); + // process input op + Shape inputShape = op->getOutputShape(0); nodeTid = allocateTensor(name, TensorDescription::Type::IN, &inputShape); _inputs.push_back(nodeTid); type = OpDescr::Type::IN; } else if (!name.empty()) { - // process output node + // process output op nodeTid = allocateTensor(name, TensorDescription::Type::OUT); _named_tensors.push_back(nodeTid); type = OpDescr::Type::OUT; } else { - // process ordinary node + // process ordinary op nodeTid = allocateTensor(); } assert(nodeTid != INVALID_TENSOR_ID); nodeOutputs.push_back(nodeTid); - // process node outputs - // consider node as output if it has no consumers - if (node->getNextNodes().empty()) + // process op outputs + // consider op as output if it has no consumers + if (op->getNextNodes().empty()) { assert(type == OpDescr::Type::OUT); _outputs.push_back(nodeTid); } - // process node inputs + // process op inputs vector nodeInputs; - for (const INode::IODescriptor &d: node->getPrevNodes()) + for (const IODescriptor &d: op->getPrevNodes()) { size_t idx = d.index; - INode *node = d.node; - assert(_nodeToDescr.find(node) != _nodeToDescr.end()); - const OpDescr &descr = *_nodeToDescr[node]; + Operation *op = d.op; + assert(_opToDescr.find(op) != _opToDescr.end()); + const OpDescr &descr = *_opToDescr[op]; const size_t &inTid = descr._outputs[idx]; nodeInputs.push_back(inTid); } - _inferenceSequence.push_back({type, node, opName, + _inferenceSequence.push_back({type, op, opName, std::move(nodeInputs), std::move(nodeOutputs), 0}); - _nodeToDescr[node] = &_inferenceSequence.back(); + _opToDescr[op] = &_inferenceSequence.back(); } size_t ModelAnalyzer::allocateTensor(const string &name, TensorDescription::Type type, Shape *shape) @@ -116,97 +117,83 @@ size_t ModelAnalyzer::allocateTensor(const string &name, TensorDescription::Type return id; } -void ModelAnalyzer::visit(INode *node, ops::ConcatOp &op) -{ - addOpDescr(node, "concat"); +void ModelAnalyzer::visit(ops::ConcatOp& op) { + addOpDescr(&op, "concat"); } -void ModelAnalyzer::visit(INode *node, ops::Conv2DOp &op) -{ - addOpDescr(node, "conv2d"); +void ModelAnalyzer::visit(ops::Conv2DOp& op) { + addOpDescr(&op, "conv2d"); } -void ModelAnalyzer::visit(INode *node, ops::DepthwiseConv2DOp &op) -{ - addOpDescr(node, "depthwiseConv2d"); +void ModelAnalyzer::visit(ops::DepthwiseConv2DOp& op) { + addOpDescr(&op, "depthwiseConv2d"); } -void ModelAnalyzer::visit(INode *node, ops::SoftmaxOp &op) -{ - addOpDescr(node, "softmax"); +void ModelAnalyzer::visit(ops::SoftmaxOp& op) { + addOpDescr(&op, "softmax"); } /** * Model Ir does not separate different types of pool operations, but for code generation * it is easier to implement different types of pooling by different functions */ -void ModelAnalyzer::visit(INode *node, ops::PoolOp &op) -{ - const char *funcName = nullptr; - switch (op.getPoolingType()) - { - case ops::PoolOp::PoolingType::MAX: - funcName = "maxPool"; - break; - case ops::PoolOp::PoolingType::AVG: - funcName = "avgPool"; - break; - default: - assert(false && "unsupported pooling type"); +void ModelAnalyzer::visit(ops::PoolOp& op) { + const char* funcName = nullptr; + switch (op.getPoolingType()) { + case ops::PoolOp::PoolingType::MAX: + funcName = "maxPool"; + break; + case ops::PoolOp::PoolingType::AVG: + funcName = "avgPool"; + break; + default: + assert(false && "unsupported pooling type"); } - addOpDescr(node, funcName); + addOpDescr(&op, funcName); } -void ModelAnalyzer::visit(INode *node, ops::FullyConnectedOp &op) -{ - addOpDescr(node, "fullConnect"); +void ModelAnalyzer::visit(ops::FullyConnectedOp& op) { + addOpDescr(&op, "fullConnect"); } -void ModelAnalyzer::visit(INode *node, ops::CappedReluOp &op) -{ - addOpDescr(node, "cappedRelu"); +void ModelAnalyzer::visit(ops::CappedReluOp& op) { + addOpDescr(&op, "cappedRelu"); } -void ModelAnalyzer::visit(INode *node, ops::BiasAddOp &op) -{ - addOpDescr(node, "biasAdd"); +void ModelAnalyzer::visit(ops::BiasAddOp& op) { + addOpDescr(&op, "biasAdd"); } -void ModelAnalyzer::visit(INode *node, ops::VariableOp &op) -{ - assert(node->getPrevNodes().empty()); - addOpDescr(node, "in"); +void ModelAnalyzer::visit(ops::VariableOp& op) { + assert(op.getPrevNodes().empty()); + addOpDescr(&op, "in"); } -void ModelAnalyzer::visit(INode *node, ops::ReluOp &op) -{ - addOpDescr(node, "relu"); +void ModelAnalyzer::visit(ops::ReluOp& op) { + addOpDescr(&op, "relu"); } -void ModelAnalyzer::visit(INode *node, ops::ReshapeOp &op) -{ - addOpDescr(node, "reshape"); +void ModelAnalyzer::visit(ops::ReshapeOp& op) { + addOpDescr(&op, "reshape"); } -void ModelAnalyzer::visit(INode *node, ops::DropoutOp &op) -{ - addOpDescr(node, "dropout"); +void ModelAnalyzer::visit(ops::DropoutOp& op) { + addOpDescr(&op, "dropout"); } -void ModelAnalyzer::visit(INode *node, ops::ScaleOp &op) -{ - addOpDescr(node, "scale"); +void ModelAnalyzer::visit(ops::ScaleOp& op) { + addOpDescr(&op, "scale"); } -void ModelAnalyzer::visit(INode *node, ops::BatchNormOp &op) { - addOpDescr(node, "batchNorm"); +void ModelAnalyzer::visit(ops::BatchNormOp& op) { + addOpDescr(&op, "batchNorm"); } -void ModelAnalyzer::visit(mir::INode *node, mir::ops::TanhOp &op) { - addOpDescr(node, "tanh"); +void ModelAnalyzer::visit(mir::ops::TanhOp& op) { + addOpDescr(&op, "tanh"); } -void ModelAnalyzer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) { +void ModelAnalyzer::visit(mir::ops::ElementwiseOp& op) { const char *funcName = nullptr; switch ( op.getOpType() ) { case ops::ElementwiseOp::OpType::sum: @@ -221,22 +208,22 @@ void ModelAnalyzer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) { default: assert(false && "unsupported elementwise operation type"); } - addOpDescr(node, funcName); + addOpDescr(&op, funcName); } -void ModelAnalyzer::visit(mir::INode *node, mir::ops::EluOp &op) { - addOpDescr(node, "elu"); +void ModelAnalyzer::visit(mir::ops::EluOp& op) { + addOpDescr(&op, "elu"); } -void ModelAnalyzer::visit(mir::INode *node, mir::ops::DeConv2DOp &op) { - addOpDescr(node, "transposedconv2d"); +void ModelAnalyzer::visit(mir::ops::DeConv2DOp& op) { + addOpDescr(&op, "transposedconv2d"); } -void ModelAnalyzer::visit(INode* node, ops::SqueezeOp& op) { - addOpDescr(node, "squeeze"); +void ModelAnalyzer::visit(ops::SqueezeOp& op) { + addOpDescr(&op, "reshape"); } -void ModelAnalyzer::visit(mir::INode* node, mir::ops::PadOp& op) { +void ModelAnalyzer::visit(mir::ops::PadOp& op) { assert(false && "Not implemented yet"); } diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.h b/contrib/nnc/passes/soft_backend/ModelAnalyzer.h index fccb534..d2fbfcb 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.h +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.h @@ -20,6 +20,7 @@ #include "core/modelIR/Visitor.h" #include "core/modelIR/Shape.h" #include "core/modelIR/TensorVariant.h" +#include "core/modelIR/Operation.h" #include #include @@ -65,7 +66,7 @@ struct OpDescr }; Type _type; - mir::INode *_node; + mir::Operation* _op; std::string _opName; // list of input tensors std::vector _inputs; @@ -78,29 +79,28 @@ struct OpDescr * @brief Constructs inference sequence for given computational graph, * gathers list of variables used in artifact. */ -class ModelAnalyzer: public mir::IVisitor -{ +class ModelAnalyzer: public mir::IVisitor { public: - void visit(mir::INode *node, mir::ops::ConcatOp &op) override; - void visit(mir::INode *node, mir::ops::Conv2DOp &op) override; - void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override; - void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override; - void visit(mir::INode *node, mir::ops::PoolOp &op) override; - void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override; - void visit(mir::INode *node, mir::ops::CappedReluOp &op) override; - void visit(mir::INode *node, mir::ops::BiasAddOp &op) override; - void visit(mir::INode *node, mir::ops::VariableOp &op) override; - void visit(mir::INode *node, mir::ops::ReluOp &op) override; - void visit(mir::INode *node, mir::ops::ReshapeOp &op) override; - void visit(mir::INode *node, mir::ops::ScaleOp &op) override; - void visit(mir::INode *node, mir::ops::BatchNormOp &op) override; - void visit(mir::INode *node, mir::ops::DropoutOp &op) override; - void visit(mir::INode *node, mir::ops::TanhOp &op) override; - void visit(mir::INode *node, mir::ops::ElementwiseOp &op) override; - void visit(mir::INode *node, mir::ops::DeConv2DOp &op) override; - void visit(mir::INode *node, mir::ops::EluOp &op) override; - void visit(mir::INode* node, mir::ops::SqueezeOp& op) override; - void visit(mir::INode* node, mir::ops::PadOp& op) override; + void visit(mir::ops::ConcatOp& op) override; + void visit(mir::ops::Conv2DOp& op) override; + void visit(mir::ops::DepthwiseConv2DOp& op) override; + void visit(mir::ops::SoftmaxOp& op) override; + void visit(mir::ops::PoolOp& op) override; + void visit(mir::ops::FullyConnectedOp& op) override; + void visit(mir::ops::CappedReluOp& op) override; + void visit(mir::ops::BiasAddOp& op) override; + void visit(mir::ops::VariableOp& op) override; + void visit(mir::ops::ReluOp& op) override; + void visit(mir::ops::ReshapeOp& op) override; + void visit(mir::ops::ScaleOp& op) override; + void visit(mir::ops::BatchNormOp& op) override; + void visit(mir::ops::DropoutOp& op) override; + void visit(mir::ops::TanhOp& op) override; + void visit(mir::ops::ElementwiseOp& op) override; + void visit(mir::ops::DeConv2DOp& op) override; + void visit(mir::ops::EluOp& op) override; + void visit(mir::ops::SqueezeOp& op) override; + void visit(mir::ops::PadOp& op) override; /** * @return vector of id's of network input tensors @@ -161,13 +161,13 @@ public: private: /** * @brief Common function to add function call in inference sequence - * @param node Node representing added call + * @param op Node representing added call * @param name Function name * * Inserts information about CG operation into inference sequence: name of operation, * creates tensors for operation outputs, binds operation inputs with tensors from previous operations */ - void addOpDescr(mir::INode *node, const std::string &name); + void addOpDescr(mir::Operation* op, const std::string& name); enum class TensorType { @@ -193,7 +193,7 @@ private: std::vector _named_tensors; std::vector _outputs; std::vector _tensors; - std::map _nodeToDescr; + std::map _opToDescr; }; } // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.cpp b/contrib/nnc/passes/soft_backend/SBSerializer.cpp index 9c141e0..bb3f601 100644 --- a/contrib/nnc/passes/soft_backend/SBSerializer.cpp +++ b/contrib/nnc/passes/soft_backend/SBSerializer.cpp @@ -38,7 +38,6 @@ #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/ElementwiseOp.h" #include "core/modelIR/operations/SqueezeOp.h" -#include "core/modelIR/ir_node.h" #include "pass/PassException.h" #include @@ -57,7 +56,6 @@ using nnc::mir::Index; using nnc::mir::ShapeRange; using nnc::mir::transposeTensor; using nnc::mir::TensorVariant; -using nnc::mir::INode; namespace ops = nnc::mir::ops; @@ -132,7 +130,7 @@ void Serializer::serializeTensor(const TensorVariant &t) } template -void Serializer::serializePads(const Op &op, int32_t padsRank) +void Serializer::serializePads(const Op& op, int32_t padsRank) { // serialize padding type assert(etoi(op.getPaddingType()) < MAX_ENUM_VAL); @@ -150,8 +148,7 @@ void Serializer::serializePads(const Op &op, int32_t padsRank) } } -void Serializer::visit(INode *node, ops::ConcatOp &op) -{ +void Serializer::visit(ops::ConcatOp& op) { _curOp->_paramStartOffset = _buffer.size(); // axis number should fit into one byte assert(op.getAxis() <= MAX_DIMS); @@ -159,8 +156,7 @@ void Serializer::visit(INode *node, ops::ConcatOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode *node, ops::Conv2DOp &op) -{ +void Serializer::visit(ops::Conv2DOp& op) { _curOp->_paramStartOffset = _buffer.size(); // serialize kernel shared_ptr HWCNKernel = make_shared(op.getKernel()); @@ -176,8 +172,7 @@ void Serializer::visit(INode *node, ops::Conv2DOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode *node, ops::DepthwiseConv2DOp &op) -{ +void Serializer::visit(ops::DepthwiseConv2DOp& op) { _curOp->_paramStartOffset = _buffer.size(); // serialize kernel const TensorVariant &kernel = op.getKernel(); @@ -191,16 +186,14 @@ void Serializer::visit(INode *node, ops::DepthwiseConv2DOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode *node, ops::SoftmaxOp &op) -{ +void Serializer::visit(ops::SoftmaxOp& op) { _curOp->_paramStartOffset = _buffer.size(); // axis number should fit into one byte assert(op.getAxis() <= MAX_DIMS); serializeT(op.getAxis()); } -void Serializer::visit(INode *node, ops::PoolOp &op) -{ +void Serializer::visit(ops::PoolOp& op) { _curOp->_paramStartOffset = _buffer.size(); // serialize window shape const Shape &windowShape = op.getWindowShape(); @@ -228,8 +221,7 @@ void Serializer::visit(INode *node, ops::PoolOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode *node, ops::FullyConnectedOp &op) -{ +void Serializer::visit(ops::FullyConnectedOp& op) { _curOp->_paramStartOffset = _buffer.size(); shared_ptr weights = make_shared(op.getWeights()); shared_ptr transposedWeights = transposeTensor<1, 0>(weights); @@ -237,51 +229,43 @@ void Serializer::visit(INode *node, ops::FullyConnectedOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode *node, ops::CappedReluOp &op) -{ +void Serializer::visit(ops::CappedReluOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getCap()); } -void Serializer::visit(INode *node, ops::BiasAddOp &op) -{ +void Serializer::visit(ops::BiasAddOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeTensor(op.getWeights()); } -void Serializer::visit(INode *node, ops::VariableOp &op) -{ +void Serializer::visit(ops::VariableOp& op) { // no parameters to dump } -void Serializer::visit(INode *node, ops::ReluOp &op) -{ +void Serializer::visit(ops::ReluOp& op) { _curOp->_paramStartOffset = _buffer.size(); // no parameters to dump } -void Serializer::visit(INode *node, ops::ReshapeOp &op) -{ +void Serializer::visit(ops::ReshapeOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode *node, ops::BatchNormOp &op) -{ +void Serializer::visit(ops::BatchNormOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getEps()); serializeT(op.getMovingAvgFraction()); serializeT(op.getSpatial()); } -void Serializer::visit(INode *node, ops::ScaleOp &op) -{ +void Serializer::visit(ops::ScaleOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeTensor(op.getWeights()); } -void Serializer::visit(INode *node, ops::DropoutOp &op) -{ +void Serializer::visit(ops::DropoutOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getRate()); } @@ -290,29 +274,28 @@ void Serializer::serialize(list &inferenceSequence) { for (OpDescr &descr: inferenceSequence) { - INode *node = descr._node; _curOp = &descr; - node->accept(this); + descr._op->accept(this); } } -void Serializer::visit(mir::INode *node, mir::ops::TanhOp &op) { +void Serializer::visit(mir::ops::TanhOp& op) { _curOp->_paramStartOffset = _buffer.size(); // no parameters to dump } -void Serializer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) { +void Serializer::visit(mir::ops::ElementwiseOp& op) { _curOp->_paramStartOffset = _buffer.size(); // Op type is known at codegen Time serializeT((int32_t) op.getNumInputs()); } -void Serializer::visit(mir::INode *node, mir::ops::EluOp &op) { +void Serializer::visit(mir::ops::EluOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getAlpha()); } -void Serializer::visit(mir::INode *node, mir::ops::DeConv2DOp &op) { +void Serializer::visit(mir::ops::DeConv2DOp& op) { _curOp->_paramStartOffset = _buffer.size(); // serialize kernel shared_ptr HWCNKernel = make_shared(op.getKernel()); @@ -328,12 +311,12 @@ void Serializer::visit(mir::INode *node, mir::ops::DeConv2DOp &op) { serializeShape(op.getOutputShape(0)); } -void Serializer::visit(INode* node, ops::SqueezeOp& op) { +void Serializer::visit(ops::SqueezeOp& op) { _curOp->_paramStartOffset = _buffer.size(); serializeShape(op.getOutputShape(0)); } -void Serializer::visit(mir::INode* node, mir::ops::PadOp& op) { +void Serializer::visit(mir::ops::PadOp& op) { throw PassException("Not implemented yet"); } diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.h b/contrib/nnc/passes/soft_backend/SBSerializer.h index 6e40ea1..f5135a7 100644 --- a/contrib/nnc/passes/soft_backend/SBSerializer.h +++ b/contrib/nnc/passes/soft_backend/SBSerializer.h @@ -38,30 +38,29 @@ namespace nnc * To gather this vector use `getBuffer` method. * Objects of this class are one-off and not designed to serialize more than one IR */ -class Serializer: public mir::IVisitor -{ +class Serializer: public mir::IVisitor { public: - void visit(mir::INode *node, mir::ops::ConcatOp &op) override; - void visit(mir::INode *node, mir::ops::Conv2DOp &op) override; - void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override; - void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override; - void visit(mir::INode *node, mir::ops::PoolOp &op) override; - void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override; - void visit(mir::INode *node, mir::ops::CappedReluOp &op) override; - void visit(mir::INode *node, mir::ops::BiasAddOp &op) override; - void visit(mir::INode *node, mir::ops::VariableOp &op) override; - void visit(mir::INode *node, mir::ops::ReluOp &op) override; - void visit(mir::INode *node, mir::ops::ReshapeOp &op) override; - void visit(mir::INode *node, mir::ops::ScaleOp &op) override; - void visit(mir::INode *node, mir::ops::BatchNormOp &op) override; - void visit(mir::INode *node, mir::ops::DropoutOp &op) override; - void visit(mir::INode *node, mir::ops::TanhOp &op) override; - void visit(mir::INode *node, mir::ops::ElementwiseOp &op) override; - void visit(mir::INode *node, mir::ops::DeConv2DOp &op) override; - void visit(mir::INode *node, mir::ops::EluOp &op) override; - void visit(mir::INode* node, mir::ops::SqueezeOp& op) override; - void visit(mir::INode* node, mir::ops::PadOp& op) override; + void visit(mir::ops::ConcatOp& op) override; + void visit(mir::ops::Conv2DOp& op) override; + void visit(mir::ops::DepthwiseConv2DOp& op) override; + void visit(mir::ops::SoftmaxOp& op) override; + void visit(mir::ops::PoolOp& op) override; + void visit(mir::ops::FullyConnectedOp& op) override; + void visit(mir::ops::CappedReluOp& op) override; + void visit(mir::ops::BiasAddOp& op) override; + void visit(mir::ops::VariableOp& op) override; + void visit(mir::ops::ReluOp& op) override; + void visit(mir::ops::ReshapeOp& op) override; + void visit(mir::ops::ScaleOp& op) override; + void visit(mir::ops::BatchNormOp& op) override; + void visit(mir::ops::DropoutOp& op) override; + void visit(mir::ops::TanhOp& op) override; + void visit(mir::ops::ElementwiseOp& op) override; + void visit(mir::ops::DeConv2DOp& op) override; + void visit(mir::ops::EluOp& op) override; + void visit(mir::ops::SqueezeOp& op) override; + void visit(mir::ops::PadOp& op) override; void serialize(std::list &inferenceSequence); diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp index 05af8d6..698237c 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp @@ -138,7 +138,7 @@ void TfliteImporter::walkSubGraph(const SubGraph* s) { // So far we assume that if the first dimension is equal to 1, // then it is the batch dimension and should be ignored ShapeHelper::cutOffBatchDim(inputShape); - node->getOperation()->setOutputShape(0, inputShape); + node->setOutputShape(0, inputShape); } for (auto op: *(s->operators())) @@ -149,7 +149,7 @@ void TfliteImporter::walkOperator(const Operator* op) { auto inputs = getPrecedingMIROps(op); auto params = createOpParams(op); - std::vector outputs; + std::vector outputs; unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code(); switch (opcode) { @@ -194,8 +194,8 @@ void TfliteImporter::walkOperator(const Operator* op) { _opsForTensorsTheyOutput[(*(op->outputs()))[i]] = outputs[i]; } -std::vector TfliteImporter::getPrecedingMIROps(const Operator* op) { - std::vector inputsForOp; +std::vector TfliteImporter::getPrecedingMIROps(const Operator* op) { + std::vector inputsForOp; try { for (auto i : *(op->inputs())) { diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index 7f90dc8..0845ab2 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -40,7 +40,7 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params, +std::vector TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params, const Conv2DOptions* opts) { auto outputs = createOp(inputs, ActivationFunctionType_NONE, std::move(*params[0]), Shape{static_cast(opts->stride_h()), @@ -55,8 +55,9 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params, - const DepthwiseConv2DOptions* opts) { +std::vector TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, + InputParams params, + const DepthwiseConv2DOptions* opts) { auto outputs = createOp( inputs, ActivationFunctionType_NONE, std::move(*params[0]), Shape{static_cast(opts->stride_h()), @@ -71,8 +72,9 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector TFLiteOpCreator::convertConcatenation(InputOps inputs, InputParams params, - const ConcatenationOptions* opts) { +std::vector TFLiteOpCreator::convertConcatenation(InputOps inputs, + InputParams params, + const ConcatenationOptions* opts) { // Decrementing axis to account for the unnecessary batch dimension return createOp(inputs, opts->fused_activation_function(), inputs.size(), opts->axis() - 1); @@ -83,8 +85,8 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params, - const Pool2DOptions* opts) { +std::vector TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params, + const Pool2DOptions* opts) { return createOp(inputs, opts->fused_activation_function(), Shape{static_cast(opts->filter_height()), static_cast(opts->filter_width()), 1}, @@ -94,8 +96,9 @@ std::vector TFLiteOpCreator::convertMaxPool2D(InputOps inputs, Input ops::PoolOp::BorderType::EMPTY); } -std::vector TFLiteOpCreator::convertAveragePool2D(InputOps inputs, InputParams params, - const Pool2DOptions* opts) { +std::vector TFLiteOpCreator::convertAveragePool2D(InputOps inputs, + InputParams params, + const Pool2DOptions* opts) { return createOp(inputs, opts->fused_activation_function(), Shape{static_cast(opts->filter_height()), static_cast(opts->filter_width()), 1}, @@ -105,36 +108,37 @@ std::vector TFLiteOpCreator::convertAveragePool2D(InputOps inputs, I ops::PoolOp::BorderType::EMPTY); } -std::vector TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params, - const SoftmaxOptions* opts) { +std::vector TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params, + const SoftmaxOptions* opts) { // -1 represents last one dimension return createOp(inputs, ActivationFunctionType_NONE, -1); } -std::vector TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params, - const ReshapeOptions* opts) { +std::vector TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params, + const ReshapeOptions* opts) { auto outputs = createOp(inputs, ActivationFunctionType_NONE); // TODO: we should also support "-1" values in new_shape, which means that correct // shape values must be calculated. Better do it in the shape inference module. Shape newShape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size()); - outputs[0]->getOperation()->setOutputShape(0, newShape); + outputs[0]->setOutputShape(0, newShape); return outputs; } void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts, - std::set& problems_op_set) { + std::set& problems_op_set) { checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector TFLiteOpCreator::convertFullyConnected(InputOps& inputs, - InputParams& params, - const FullyConnectedOptions* opts) { +std::vector +TFLiteOpCreator::convertFullyConnected(InputOps& inputs, + InputParams& params, + const FullyConnectedOptions* opts) { // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize] auto outputs = createOp(inputs, ActivationFunctionType_NONE); int32_t fcInputSize = params[0]->getShape().dim(0); - outputs[0]->getOperation()->setOutputShape(0, {1, fcInputSize}); + outputs[0]->setOutputShape(0, {1, fcInputSize}); auto fc_outputs = createOp(outputs, ActivationFunctionType_NONE, std::move(*params[0])); @@ -143,7 +147,7 @@ std::vector TFLiteOpCreator::convertFullyConnected(InputOps& inputs, } void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type, - std::set& problems_op_set) { + std::set& problems_op_set) { if (activation_type != ActivationFunctionType_NONE && activation_type != ActivationFunctionType_RELU && activation_type != ActivationFunctionType_RELU6) @@ -151,9 +155,9 @@ void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type + EnumNamesActivationFunctionType()[activation_type]); } -INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input, - ActivationFunctionType activation_type) { - INode::Ref activation; +mir::Operation* TFLiteOpCreator::addFusedActivation(mir::Operation* input, + ActivationFunctionType activation_type) { + mir::Operation* activation; if (activation_type != ActivationFunctionType_NONE) { // TODO: process other activation types @@ -168,7 +172,7 @@ INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input, assert(false && "Unsupported activation types must be detected before this pass"); } - assert(input->getOperation()->getNumOutputs() == 1); + assert(input->getNumOutputs() == 1); activation->connectInputTo(0, input->getOutput(0)); return activation; } else { @@ -176,16 +180,16 @@ INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input, } } -void TFLiteOpCreator::connectInputs(INode::Ref op, std::vector& inputs) { +void TFLiteOpCreator::connectInputs(mir::Operation* op, std::vector& inputs) { // TODO: this part doesn't support the situation where an operator takes as input // some tensor that is not the 0th output of some other operator - assert(inputs.size() == op->getOperation()->getNumInputs()); + assert(inputs.size() == op->getNumInputs()); for (size_t i = 0; i < inputs.size(); ++i) op->connectInputTo(i, inputs[i]->getOutput(0)); } -std::vector TFLiteOpCreator::createSqueeze(InputOps inputs, InputParams params, - const ::tflite::SqueezeOptions* opts) { +std::vector TFLiteOpCreator::createSqueeze(InputOps inputs, InputParams params, + const ::tflite::SqueezeOptions* opts) { std::vector squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()}; diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index 502ec53..fa9e0ad 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -23,8 +23,7 @@ #include #include -#include "core/modelIR/graph.h" -#include "core/modelIR/ir_node.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/TensorVariant.h" #include "core/modelIR/Shape.h" @@ -37,38 +36,40 @@ namespace nnc { namespace ops = mir::ops; using mir::Graph; -using mir::INode; using IrTensor = mir::TensorVariant; using mir::Shape; class TFLiteOpCreator { public: - using InputOps = std::vector&; + using InputOps = std::vector&; using InputParams = std::vector>&; explicit TFLiteOpCreator(Graph* g) : graph(g) {}; - std::vector convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*); + std::vector convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*); - std::vector convertDepthwiseConv2D(InputOps, InputParams, - const ::tflite::DepthwiseConv2DOptions*); + std::vector convertDepthwiseConv2D(InputOps, InputParams, + const ::tflite::DepthwiseConv2DOptions*); - std::vector convertConcatenation(InputOps, InputParams, - const ::tflite::ConcatenationOptions*); + std::vector convertConcatenation(InputOps, InputParams, + const ::tflite::ConcatenationOptions*); - std::vector convertMaxPool2D(InputOps, InputParams, const ::tflite::Pool2DOptions*); + std::vector convertMaxPool2D(InputOps, InputParams, + const ::tflite::Pool2DOptions*); - std::vector convertAveragePool2D(InputOps, InputParams, - const ::tflite::Pool2DOptions*); + std::vector convertAveragePool2D(InputOps, InputParams, + const ::tflite::Pool2DOptions*); - std::vector createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*); + std::vector createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*); - std::vector convertReshape(InputOps, InputParams, const ::tflite::ReshapeOptions*); + std::vector convertReshape(InputOps, InputParams, + const ::tflite::ReshapeOptions*); - std::vector convertFullyConnected(InputOps, InputParams, - const ::tflite::FullyConnectedOptions*); + std::vector convertFullyConnected(InputOps, InputParams, + const ::tflite::FullyConnectedOptions*); - std::vector createSqueeze(InputOps& inputs, InputParams& params, const ::tflite::SqueezeOptions* opts); + std::vector createSqueeze(InputOps& inputs, InputParams& params, + const ::tflite::SqueezeOptions* opts); void checkPool2D(const ::tflite::Pool2DOptions*, std::set&); @@ -89,20 +90,22 @@ private: void checkActivationType(::tflite::ActivationFunctionType, std::set&); - INode::Ref addFusedActivation(INode::Ref input, ::tflite::ActivationFunctionType activationType); + mir::Operation* addFusedActivation(mir::Operation* input, + ::tflite::ActivationFunctionType activationType); - void connectInputs(INode::Ref op, std::vector& inputs); + void connectInputs(mir::Operation* op, std::vector& inputs); template - std::vector createOp(std::vector& inputs, - ::tflite::ActivationFunctionType activation, Types&& ... args); + std::vector createOp(std::vector& inputs, + ::tflite::ActivationFunctionType activation, + Types&& ... args); }; template -std::vector TFLiteOpCreator::createOp( - std::vector& inputs, +std::vector TFLiteOpCreator::createOp( + std::vector& inputs, ::tflite::ActivationFunctionType activation, Types&& ... args) { - std::vector outputs; + std::vector outputs; // TODO: how to name operations? in Tensorflow tensors get names, not operations auto op = graph->create("", std::forward(args)...); diff --git a/contrib/nnc/tests/interpreter/graph_creator.cpp b/contrib/nnc/tests/interpreter/graph_creator.cpp index 952417f..d40a478 100644 --- a/contrib/nnc/tests/interpreter/graph_creator.cpp +++ b/contrib/nnc/tests/interpreter/graph_creator.cpp @@ -40,64 +40,66 @@ using namespace nnc; using namespace nnc::mir; -static INode::Ref createFullyConnected(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createFullyConnected(std::unique_ptr& g, + const opinfo::OperatorInfo* opInfo) { return g->create( "y", *getKernel(opInfo)); } -static INode::Ref createConv2D(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createConv2D(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { return g->create( "y", *getKernel(opInfo), getShapeParam(opInfo, 0), getPaddingType(opInfo)); } -static INode::Ref createDepthwiseConv2D(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createDepthwiseConv2D(std::unique_ptr& g, + const opinfo::OperatorInfo* opInfo) { return g->create( "y", *getKernel(opInfo), getShapeParam(opInfo, 0), getPaddingType(opInfo)); } -static INode::Ref createPool(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createPool(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { return g->create("y", getShapeParam(opInfo, 0), getShapeParam(opInfo, 1), getPoolingType(opInfo), getPaddingType(opInfo), ops::PoolOp::BorderType::ZEROFILLED); } -static INode::Ref createConcatenation(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createConcatenation(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { return g->create("y", opInfo->inputs()->size(), getAxis(opInfo)); } -static INode::Ref createReshape(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createReshape(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { auto op = g->create("y"); - op->getOperation()->setOutputShape(0, getShapeParam(opInfo, 0)); + op->setOutputShape(0, getShapeParam(opInfo, 0)); return op; } -static INode::Ref createReLU(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createReLU(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { (void)opInfo; return g->create("y"); } -static INode::Ref createCappedReLU(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createCappedReLU(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { return g->create("y", getAxis(opInfo)); } -static INode::Ref createSoftmax(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createSoftmax(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { return g->create("y", getAxis(opInfo)); } -static INode::Ref createBiasAdd(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createBiasAdd(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { return g->create("y", *getKernel(opInfo)); } -static INode::Ref createOp(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) +static Operation* createOp(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { switch (opInfo->op()) { @@ -137,15 +139,15 @@ std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo) for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i) { // Create i-th input node - auto inputNode = g->create("x" + std::to_string(i)); + auto inputOp = g->create("x" + std::to_string(i)); // Connect i-th operation input to i-th input node - opNode->connectInputTo(i, inputNode->getOutput(0)); + opNode->connectInputTo(i, inputOp->getOutput(0)); // Set input shape auto inputShapeIter = opInfo->inputs()->Get(i)->shape()->dims(); Shape inputShape = ShapeHelper::createShape(*inputShapeIter, inputShapeIter->size()); - inputNode->getOperation()->setOutputShape(0, inputShape); + inputOp->setOutputShape(0, inputShape); } // Mark outputs diff --git a/contrib/nnc/tests/interpreter/graph_creator.h b/contrib/nnc/tests/interpreter/graph_creator.h index 9b84f7d..a0bc109 100644 --- a/contrib/nnc/tests/interpreter/graph_creator.h +++ b/contrib/nnc/tests/interpreter/graph_creator.h @@ -17,7 +17,7 @@ #ifndef NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H #define NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo); diff --git a/contrib/nnc/tests/interpreter/op_test.cpp b/contrib/nnc/tests/interpreter/op_test.cpp index 6b87d1e..0c98844 100644 --- a/contrib/nnc/tests/interpreter/op_test.cpp +++ b/contrib/nnc/tests/interpreter/op_test.cpp @@ -22,7 +22,7 @@ #include "op_info_generated.h" #include "passes/interpreter/Interpreter.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "op_info_util.h" #include "graph_creator.h" diff --git a/contrib/nnc/tests/soft_backend/CompileCPP.cpp b/contrib/nnc/tests/soft_backend/CompileCPP.cpp index 27252a4..22c11ec 100644 --- a/contrib/nnc/tests/soft_backend/CompileCPP.cpp +++ b/contrib/nnc/tests/soft_backend/CompileCPP.cpp @@ -28,7 +28,7 @@ #include "support/CommandLine.h" #include "option/Options.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/Shape.h" #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/VariableOp.h" @@ -48,16 +48,16 @@ using namespace nnc::mir; // Creates simple graph with input and output void fillGraph(Graph &g) { - INode *opNode = g.create("out"); + Operation* outputOp = g.create("out"); Shape inputShape{1, 2, 3}; - INode *inputNode = g.create("in"); + Operation* inputOp = g.create("in"); - opNode->connectInputTo(0, inputNode->getOutput(0)); + outputOp->connectInputTo(0, inputOp->getOutput(0)); - inputNode->getOperation()->setOutputShape(0, inputShape); + inputOp->setOutputShape(0, inputShape); - g.markOutput(opNode); + g.markOutput(outputOp); ShapeInference shapeInferencer; g.accept(&shapeInferencer); diff --git a/contrib/nnc/unittests/core/CMakeLists.txt b/contrib/nnc/unittests/core/CMakeLists.txt index fa3af8a..d2e786b 100644 --- a/contrib/nnc/unittests/core/CMakeLists.txt +++ b/contrib/nnc/unittests/core/CMakeLists.txt @@ -1,5 +1,4 @@ -set(TESTS "ir_node.cpp" - "operation.cpp" +set(TESTS "operation.cpp" "ShapeIndex.cpp" "ShapeInference.cpp" "ShapeRange.cpp" diff --git a/contrib/nnc/unittests/core/Graph.cpp b/contrib/nnc/unittests/core/Graph.cpp index 5e88db2..97f8ade 100644 --- a/contrib/nnc/unittests/core/Graph.cpp +++ b/contrib/nnc/unittests/core/Graph.cpp @@ -1,8 +1,8 @@ #include -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" +#include "core/modelIR/operations/VariableOp.h" #include "core/modelIR/operations/ReluOp.h" - #include "core/modelIR/operations/ConcatOp.h" namespace { @@ -14,16 +14,16 @@ class DumpVisitor : public Visitor { public: DumpVisitor(std::ostream& s) : _s(s) {} - void visit(INode* node, ops::VariableOp& op) override { - _s << "i" << node->getName(); + void visit(ops::VariableOp& op) override { + _s << "i" << op.getName(); }; - void visit(INode* node, ops::ReluOp& op) override { - _s << "r" << node->getName(); + void visit(ops::ReluOp& op) override { + _s << "r" << op.getName(); } - void visit(INode* node, ops::ConcatOp& op) override { - _s << "c" << node->getName(); + void visit(ops::ConcatOp& op) override { + _s << "c" << op.getName(); } std::ostream& _s; @@ -77,7 +77,7 @@ TEST(Graph, ReplaceOutputs) { g->replaceOutputNodes({"op3"}); - std::vector expectedOutputs{n3}; + std::vector expectedOutputs{n3}; ASSERT_EQ(g->collectOutputs(), expectedOutputs); delete g; }; @@ -94,7 +94,7 @@ TEST(Graph, ReplaceOutputNodeWithInput) { auto in2 = g->replaceWithInputNode(n2); - std::vector expectedInputs{in2, n1}; + std::vector expectedInputs{in2, n1}; ASSERT_EQ(g->collectInputs(), expectedInputs); delete g; } diff --git a/contrib/nnc/unittests/core/NodeReplacer.cpp b/contrib/nnc/unittests/core/NodeReplacer.cpp index bdc4f81..4179f69 100644 --- a/contrib/nnc/unittests/core/NodeReplacer.cpp +++ b/contrib/nnc/unittests/core/NodeReplacer.cpp @@ -1,7 +1,9 @@ #include -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" +#include "core/modelIR/operations/VariableOp.h" #include "core/modelIR/operations/ReluOp.h" +#include "core/modelIR/operations/ConcatOp.h" namespace { @@ -12,16 +14,16 @@ class DumpVisitor : public Visitor { public: DumpVisitor(std::ostream& s) : _s(s) {} - void visit(INode* node, ops::VariableOp& op) override { - _s << "i" << node->getName(); + void visit(ops::VariableOp& op) override { + _s << "i" << op.getName(); }; - void visit(INode* node, ops::ReluOp& op) override { - _s << "r" << node->getName(); + void visit(ops::ReluOp& op) override { + _s << "r" << op.getName(); } - void visit(INode* node, ops::ConcatOp& op) override { - _s << "c" << node->getName(); + void visit(ops::ConcatOp& op) override { + _s << "c" << op.getName(); } std::ostream& _s; diff --git a/contrib/nnc/unittests/core/ShapeInference.cpp b/contrib/nnc/unittests/core/ShapeInference.cpp index 6497990..0322809 100644 --- a/contrib/nnc/unittests/core/ShapeInference.cpp +++ b/contrib/nnc/unittests/core/ShapeInference.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/ShapeInference.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/SqueezeOp.h" @@ -31,16 +31,16 @@ TEST(ShapeInferenceTest, ReshapeAutoDimension) { Shape resultShape{ 10, 1, 10 }; auto input = g.create("input"); - input->getOperation()->setOutputShape(0, Shape{ 10, 2, 5} ); + input->setOutputShape(0, Shape{ 10, 2, 5} ); - auto n = g.create("reshape"); - n->getOperation()->setInputShape( 0, Shape{10, 2, 5} ); - n->getOperation()->setOutputShape(0, Shape{10, 1, Shape::AUTO_DIM} ); - n->connectInputTo(0, input->getOutput(0)); + auto op = g.create("reshape"); + op->setInputShape( 0, Shape{10, 2, 5} ); + op->setOutputShape(0, Shape{10, 1, Shape::AUTO_DIM} ); + op->connectInputTo(0, input->getOutput(0)); - si.visit(n, *static_cast(n->getOperation())); + si.visit(*dynamic_cast(op)); - ASSERT_EQ(resultShape, n->getOperation()->getOutputShape(0)); + ASSERT_EQ(resultShape, op->getOutputShape(0)); } TEST(ShapeInferenceTest, ReshapeAutoDimensionVaryRank) { @@ -53,21 +53,21 @@ TEST(ShapeInferenceTest, ReshapeAutoDimensionVaryRank) { auto input = g.create("input"); - input->getOperation()->setOutputShape(0, inputShape); + input->setOutputShape(0, inputShape); - auto n = g.create("reshape"); - n->getOperation()->setInputShape( 0, inputShape); - n->connectInputTo(0, input->getOutput(0)); + auto op = g.create("reshape"); + op->setInputShape( 0, inputShape); + op->connectInputTo(0, input->getOutput(0)); // test shrink - n->getOperation()->setOutputShape(0, Shape{10, Shape::AUTO_DIM}); - si.visit(n, *static_cast(n->getOperation())); - ASSERT_EQ(resultShapeShrink, n->getOperation()->getOutputShape(0)); + op->setOutputShape(0, Shape{10, Shape::AUTO_DIM}); + si.visit(*dynamic_cast(op)); + ASSERT_EQ(resultShapeShrink, op->getOutputShape(0)); // test expansion - n->getOperation()->setOutputShape(0, Shape{5, Shape::AUTO_DIM, 2, 2}); - si.visit(n, *static_cast(n->getOperation())); - ASSERT_EQ(resultShapeExpand, n->getOperation()->getOutputShape(0)); + op->setOutputShape(0, Shape{5, Shape::AUTO_DIM, 2, 2}); + si.visit(*dynamic_cast(op)); + ASSERT_EQ(resultShapeExpand, op->getOutputShape(0)); } TEST(ShapeInferenceTest, SqueezeTestAllDims) { @@ -78,14 +78,14 @@ TEST(ShapeInferenceTest, SqueezeTestAllDims) { Shape expected_shape{2, 4}; auto input = g.create("input"); - input->getOperation()->setOutputShape(0, input_shape); + input->setOutputShape(0, input_shape); auto sq1 = g.create("squeeze_1", std::vector{}); sq1->connectInputTo(0, input->getOutput(0)); g.accept(&si); - ASSERT_EQ(sq1->getOperation()->getOutputShape(0), expected_shape); + ASSERT_EQ(sq1->getOutputShape(0), expected_shape); } TEST(ShapeInferenceTest, SqueezeTestSpecificDims) { @@ -96,7 +96,7 @@ TEST(ShapeInferenceTest, SqueezeTestSpecificDims) { Shape expected_shape{1, 2, 4}; auto input = g.create("input"); - input->getOperation()->setOutputShape(0, input_shape); + input->setOutputShape(0, input_shape); auto sq1 = g.create("squeeze_1", std::vector{2}); @@ -104,7 +104,7 @@ TEST(ShapeInferenceTest, SqueezeTestSpecificDims) { g.accept(&si); - ASSERT_EQ(sq1->getOperation()->getOutputShape(0), expected_shape); + ASSERT_EQ(sq1->getOutputShape(0), expected_shape); } TEST(ShapeInferenceTest, SqueezeTestScalarResult) { @@ -115,7 +115,7 @@ TEST(ShapeInferenceTest, SqueezeTestScalarResult) { Shape expected_shape{1}; auto input = g.create("input"); - input->getOperation()->setOutputShape(0, input_shape); + input->setOutputShape(0, input_shape); auto sq1 = g.create("squeeze_1", std::vector{}); @@ -123,5 +123,5 @@ TEST(ShapeInferenceTest, SqueezeTestScalarResult) { g.accept(&si); - ASSERT_EQ(sq1->getOperation()->getOutputShape(0), expected_shape); + ASSERT_EQ(sq1->getOutputShape(0), expected_shape); } diff --git a/contrib/nnc/unittests/core/ir_node.cpp b/contrib/nnc/unittests/core/ir_node.cpp deleted file mode 100644 index 7a197bd..0000000 --- a/contrib/nnc/unittests/core/ir_node.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "core/modelIR/operations/operation.h" -#include "core/modelIR/operations/ReshapeOp.h" -#include "core/modelIR/ir_node.h" - -#include - -using namespace nnc::mir; - -TEST(IRNode, ConnectionTest) { - - auto node1 = Node::createNode("node1", 0); - auto node2 = Node::createNode("node2", 1); - - node2->connectInputTo(0, node1->getOutput(0)); - - ASSERT_EQ(node1->getId(), node2->getPrevNodes()[0].node->getId()); - - delete node1; - delete node2; -} diff --git a/contrib/nnc/unittests/core/operation.cpp b/contrib/nnc/unittests/core/operation.cpp index 6604989..f7b5eaa 100644 --- a/contrib/nnc/unittests/core/operation.cpp +++ b/contrib/nnc/unittests/core/operation.cpp @@ -14,7 +14,8 @@ * limitations under the License. */ -#include "core/modelIR/operations/operation.h" +#include "core/modelIR/Operation.h" +#include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/ConcatOp.h" @@ -22,11 +23,26 @@ using namespace nnc::mir; -TEST(OpDescription, InputOutputShapeTest) { +TEST(Operation, ConnectionTest) { + + auto op1 = new ops::ReshapeOp(); + op1->setId(0); + auto op2 = new ops::ReshapeOp(); + op2->setId(1); + + op2->connectInputTo(0, op1->getOutput(0)); + + ASSERT_EQ(op1->getId(), op2->getPrevNodes()[0].op->getId()); + + delete op1; + delete op2; +} + +TEST(Operation, InputOutputShapeTest) { Shape inShape{1,2,3}; Shape outShape{3,2,1}; - OpDescription op(1, 1); + ops::SoftmaxOp op(0); op.setInputShape(0, inShape ); op.setOutputShape(0, outShape ); @@ -34,7 +50,7 @@ TEST(OpDescription, InputOutputShapeTest) { ASSERT_EQ(outShape, op.getOutputShape(0)); } -TEST(OpDescription, SoftmaxAxisTest) { +TEST(Operation, SoftmaxAxisTest) { Shape inShape{1,2,3}; ops::SoftmaxOp op_1(1); @@ -50,7 +66,7 @@ TEST(OpDescription, SoftmaxAxisTest) { ASSERT_EQ(op_n3.getAxis(), 0); } -TEST(OpDescription, ConcatAxisTest) { +TEST(Operation, ConcatAxisTest) { Shape inShape{1,2,3}; ops::ConcatOp op_1(2, 1); diff --git a/contrib/nnc/unittests/pass/PassManagerTest.cpp b/contrib/nnc/unittests/pass/PassManagerTest.cpp index c898251..d0397ae 100644 --- a/contrib/nnc/unittests/pass/PassManagerTest.cpp +++ b/contrib/nnc/unittests/pass/PassManagerTest.cpp @@ -16,7 +16,7 @@ #include -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "support/CommandLine.h" #include "pass/Pass.h" #include "pass/PassData.h" diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index 23a4f52..e6ea956 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -70,7 +70,7 @@ // various headers #include "core/modelIR/TensorVariant.h" #include "core/modelIR/Tensor.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/ShapeRange.h" #include "core/modelIR/ShapeInference.h" @@ -95,33 +95,33 @@ namespace irOps = nnc::mir::ops; */ /** Creates graph with one operation generated by opGen function and returns this operation node*/ -mir::INode *fillGraph(mir::Graph &g, function opGen, - const vector> &inputNTensors) +mir::Operation* fillGraph(mir::Graph& g, function opGen, + const vector>& inputNTensors) { - // Create operation node - mir::INode *opNode = opGen(g); + // Create operation + mir::Operation* op = opGen(g); - int numInputs = opNode->getPrevNodes().size(); + int numInputs = op->getPrevNodes().size(); assert(inputNTensors.size() == static_cast(numInputs)); for (int i = 0; i < numInputs; ++i) { // Create i-th input node - auto inputNode = g.create("x" + std::to_string(i)); + auto inputOp = g.create("x" + std::to_string(i)); // Connect i-th operation input to i-th input node - opNode->connectInputTo(i, inputNode->getOutput(0)); + op->connectInputTo(i, inputOp->getOutput(0)); // Set input shape - inputNode->getOperation()->setOutputShape(0, inputNTensors[i]->getShape()); + inputOp->setOutputShape(0, inputNTensors[i]->getShape()); } // Mark outputs - g.markOutput(opNode); + g.markOutput(op); // Run shape inference mir::ShapeInference shapeInferencer; g.accept(&shapeInferencer); - return opNode; + return op; } /** Fills NNC Shape object with data from src container*/ @@ -255,16 +255,16 @@ void compareResults(const mir::TensorVariant &refNTensor, const Tensor &testATen * This function creates test graph, runs interpeter, specifies artifact operation and compares results */ template -void createAndRunTestGraph(function opGenerator, TestFunc artifactOperation, +void createAndRunTestGraph(function opGenerator, TestFunc artifactOperation, const vector> &inputNTensors, const Args &...inputATensors) { mir::Graph g; - mir::INode *actualOperation = fillGraph(g, opGenerator, inputNTensors); + mir::Operation *actualOperation = fillGraph(g, opGenerator, inputNTensors); // serialize data for soft backend operation list inferenceSequence; OpDescr opDescr; - opDescr._node = actualOperation; + opDescr._op = actualOperation; inferenceSequence.push_back(opDescr); Serializer serializer; serializer.serialize(inferenceSequence); @@ -590,7 +590,7 @@ TEST(cpp_operations_test, reshape) auto opGenerator = [nOutputShape](mir::Graph &g) { auto op = g.create("y"); - op->getOperation()->setOutputShape(0, nOutputShape); + op->setOutputShape(0, nOutputShape); return op; }; diff --git a/contrib/nnc/unittests/soft_backend/Generator.cpp b/contrib/nnc/unittests/soft_backend/Generator.cpp index b62da3e..7ff9214 100644 --- a/contrib/nnc/unittests/soft_backend/Generator.cpp +++ b/contrib/nnc/unittests/soft_backend/Generator.cpp @@ -87,9 +87,9 @@ TEST(Generator, check_generator_call) cli::CommandLine::getParser()->parseCommandLine(argc, argv, false); nnc::mir::Graph g; - INode *input = g.create("input"); - input->getOperation()->setOutputShape(0, Shape({1,2,3,4})); - INode *output = g.create("output"); + Operation* input = g.create("input"); + input->setOutputShape(0, Shape({1,2,3,4})); + Operation* output = g.create("output"); output->connectInputTo(0, input->getOutput(0)); // test that generator creates output dir and files diff --git a/contrib/nnc/utils/caffe_dot_dumper/model_dump.cpp b/contrib/nnc/utils/caffe_dot_dumper/model_dump.cpp index 34f0c8d..4493648 100644 --- a/contrib/nnc/utils/caffe_dot_dumper/model_dump.cpp +++ b/contrib/nnc/utils/caffe_dot_dumper/model_dump.cpp @@ -19,7 +19,7 @@ #include "support/CommandLine.h" #include "option/Options.h" #include "passes/caffe_frontend/caffe_importer.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/IrDotDumper.h" #include "core/modelIR/ShapeInference.h" #include "pass/PassException.h" diff --git a/contrib/nnc/utils/tflite_dot_dumper/sanity_check.cpp b/contrib/nnc/utils/tflite_dot_dumper/sanity_check.cpp index e40d560..9fa193f 100644 --- a/contrib/nnc/utils/tflite_dot_dumper/sanity_check.cpp +++ b/contrib/nnc/utils/tflite_dot_dumper/sanity_check.cpp @@ -20,7 +20,7 @@ #include "pass/PassException.h" #include "option/Options.h" #include "passes/tflite_frontend/tflite_importer.h" -#include "core/modelIR/graph.h" +#include "core/modelIR/Graph.h" #include "core/modelIR/IrDotDumper.h" #include "core/modelIR/ShapeInference.h" -- 2.7.4