From 29d3cda46a233ee8575b480c9f6e6032c50752dd Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 8 Feb 2019 16:41:37 +0300 Subject: [PATCH] [nnc] Redesign Model IR. Part 2. (#3017) * Remove `IODescriptor` type alias. * Add non-const variants of `getNode` method to `Operation::Output` and `Operation::Input` classes. Signed-off-by: Sergei Barannikov --- contrib/nnc/core/modelIR/Graph.cpp | 6 +- contrib/nnc/core/modelIR/GraphPatternMatcher.cpp | 4 +- contrib/nnc/core/modelIR/Operation.cpp | 2 +- contrib/nnc/core/modelIR/ir_dot_builder.cpp | 21 ++-- .../nnc/core/modelIR/operations/TransposeOp.cpp | 2 +- contrib/nnc/include/core/modelIR/Operation.h | 9 +- contrib/nnc/include/core/modelIR/ir_dot_builder.h | 6 +- .../include/core/modelIR/operations/BatchNormOp.h | 2 +- .../include/core/modelIR/operations/BiasAddOp.h | 3 +- .../include/core/modelIR/operations/CappedReluOp.h | 3 +- .../nnc/include/core/modelIR/operations/ConcatOp.h | 2 +- .../nnc/include/core/modelIR/operations/Conv2DOp.h | 4 +- .../include/core/modelIR/operations/Deconv2DOp.h | 12 +-- .../core/modelIR/operations/DepthwiseConv2DOp.h | 4 +- .../include/core/modelIR/operations/DropoutOp.h | 2 +- .../core/modelIR/operations/ElementwiseOp.h | 4 +- .../nnc/include/core/modelIR/operations/EluOp.h | 2 +- .../core/modelIR/operations/FullyConnectedOp.h | 3 +- .../nnc/include/core/modelIR/operations/GatherOp.h | 2 +- .../nnc/include/core/modelIR/operations/GemmOp.h | 3 +- .../include/core/modelIR/operations/LeakyReluOp.h | 4 +- .../nnc/include/core/modelIR/operations/OutputOp.h | 2 +- .../nnc/include/core/modelIR/operations/PadOp.h | 4 +- .../nnc/include/core/modelIR/operations/PoolOp.h | 2 +- .../include/core/modelIR/operations/ReduceFOp.h | 2 +- .../nnc/include/core/modelIR/operations/ReluOp.h | 2 +- .../include/core/modelIR/operations/ReshapeOp.h | 2 +- .../nnc/include/core/modelIR/operations/ResizeOp.h | 4 +- .../nnc/include/core/modelIR/operations/ScaleOp.h | 3 +- .../include/core/modelIR/operations/SigmoidOp.h | 2 +- .../nnc/include/core/modelIR/operations/SliceOp.h | 6 +- .../include/core/modelIR/operations/SoftmaxOp.h | 2 +- .../nnc/include/core/modelIR/operations/SqrtOp.h | 2 +- .../include/core/modelIR/operations/SqueezeOp.h | 4 +- .../nnc/include/core/modelIR/operations/TanhOp.h | 2 +- .../include/core/modelIR/operations/TransposeOp.h | 2 +- .../nnc/include/passes/interpreter/Interpreter.h | 4 +- .../passes/acl_soft_backend/AclCppOpGenerator.cpp | 80 +++++++------- .../passes/acl_soft_backend/AclCppOpGenerator.h | 7 +- .../nnc/passes/caffe2_frontend/caffe2_importer.cpp | 18 ++-- .../nnc/passes/caffe2_frontend/caffe2_importer.h | 9 +- .../passes/caffe2_frontend/caffe2_op_creator.cpp | 82 +++++++------- .../nnc/passes/caffe2_frontend/caffe2_op_creator.h | 92 +++++++++------- .../nnc/passes/caffe_frontend/caffe_importer.cpp | 19 ++-- contrib/nnc/passes/caffe_frontend/caffe_importer.h | 8 +- .../nnc/passes/caffe_frontend/caffe_op_creator.cpp | 120 +++++++++++---------- .../nnc/passes/caffe_frontend/caffe_op_creator.h | 92 ++++++++-------- contrib/nnc/passes/interpreter/Interpreter.cpp | 2 +- .../nnc/passes/interpreter/interpreter_pass.cpp | 8 +- .../nnc/passes/onnx_frontend/ONNXImporterImpl.cpp | 15 +-- .../nnc/passes/onnx_frontend/ONNXImporterImpl.h | 6 +- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 80 +++++++------- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h | 78 +++++++------- contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp | 5 +- .../nnc/passes/tflite_frontend/tflite_importer.cpp | 8 +- .../nnc/passes/tflite_frontend/tflite_importer.h | 10 +- .../passes/tflite_frontend/tflite_op_creator.cpp | 107 +++++++++--------- .../nnc/passes/tflite_frontend/tflite_op_creator.h | 100 ++++++++--------- contrib/nnc/unittests/acl_backend/MIRToDOM.cpp | 45 ++++---- contrib/nnc/unittests/core/Graph.cpp | 5 +- contrib/nnc/unittests/core/ShapeInference.cpp | 6 +- .../nnc/unittests/soft_backend/CPPOperations.cpp | 77 +++++++------ .../nnc/unittests/soft_backend/ModelAnalyzer.cpp | 5 +- 63 files changed, 622 insertions(+), 607 deletions(-) diff --git a/contrib/nnc/core/modelIR/Graph.cpp b/contrib/nnc/core/modelIR/Graph.cpp index 726ef75..ea42e6e 100644 --- a/contrib/nnc/core/modelIR/Graph.cpp +++ b/contrib/nnc/core/modelIR/Graph.cpp @@ -57,12 +57,12 @@ void Graph::accept(IVisitor* visitor) { Operation* src_node = q.front(); q.pop_front(); src_node->accept(visitor); - for (const auto& src_output : src_node->getOutputs()) { - for (const auto* consumer : src_output.getConsumers()) { + for (auto& src_output : src_node->getOutputs()) { + for (auto* consumer : src_output.getConsumers()) { Operation* dst_node = consumer->getNode(); if (known_ops.count(dst_node) == 0) { bool allInputsResolved = true; - for (const auto& dst_input : dst_node->getInputs()) { + for (auto& dst_input : dst_node->getInputs()) { if (known_ops.count(dst_input.getProducer()->getNode()) == 0) { allInputsResolved = false; } diff --git a/contrib/nnc/core/modelIR/GraphPatternMatcher.cpp b/contrib/nnc/core/modelIR/GraphPatternMatcher.cpp index 61d1c65..9d29879 100644 --- a/contrib/nnc/core/modelIR/GraphPatternMatcher.cpp +++ b/contrib/nnc/core/modelIR/GraphPatternMatcher.cpp @@ -29,8 +29,8 @@ std::vector> GraphPatternMatcher::matchEdge( std::vector> matches; for (auto* start: _g->getNodes()) { if (p1(start)) { - for (const auto& out : start->getOutputs()) { - for (const auto* consumer : out.getConsumers()) { + for (auto& out : start->getOutputs()) { + for (auto* consumer : out.getConsumers()) { Operation* end = consumer->getNode(); if (p2(end)) { matches.emplace_back(std::make_pair(start, end)); diff --git a/contrib/nnc/core/modelIR/Operation.cpp b/contrib/nnc/core/modelIR/Operation.cpp index fcbb420..7b0deda 100644 --- a/contrib/nnc/core/modelIR/Operation.cpp +++ b/contrib/nnc/core/modelIR/Operation.cpp @@ -50,7 +50,7 @@ namespace nnc { namespace mir { -Operation::Operation(Type type, const std::vector& inputs, std::size_t num_outputs) +Operation::Operation(Type type, const std::vector& inputs, std::size_t num_outputs) : _type(type) { for (std::size_t i = 0; i < inputs.size(); ++i) { _inputs.emplace_back(this, i, inputs[i]); diff --git a/contrib/nnc/core/modelIR/ir_dot_builder.cpp b/contrib/nnc/core/modelIR/ir_dot_builder.cpp index 7d44607..88b03a4 100644 --- a/contrib/nnc/core/modelIR/ir_dot_builder.cpp +++ b/contrib/nnc/core/modelIR/ir_dot_builder.cpp @@ -16,32 +16,25 @@ #include "core/modelIR/ir_dot_builder.h" -namespace nnc -{ -namespace mir -{ +namespace nnc { +namespace mir { -void IrDotBuilder::updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo) -{ +void IrDotBuilder::updateWithOp(const Operation* op, const DotIrNodeInfo& irNodeInfo) { addNode(op, irNodeInfo); - for (auto &prev : op->getInputs()) - { + for (auto& prev : op->getInputs()) { addEdge(prev.getProducer()->getNode(), op); } } -void IrDotBuilder::writeDot(std::ostream &os) -{ +void IrDotBuilder::writeDot(std::ostream& os) { os << "digraph D {" << std::endl << dot.str() << std::endl << "}" << std::endl; } -void IrDotBuilder::addNode(Operation* op, const DotIrNodeInfo& irNode) -{ +void IrDotBuilder::addNode(const Operation* op, const DotIrNodeInfo& irNode) { dot << op->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl; } -void IrDotBuilder::addEdge(Operation* op1, Operation* op2) -{ +void IrDotBuilder::addEdge(const Operation* op1, const Operation* op2) { dot << op1->getId() << " -> " << op2->getId() << ";" << std::endl; } diff --git a/contrib/nnc/core/modelIR/operations/TransposeOp.cpp b/contrib/nnc/core/modelIR/operations/TransposeOp.cpp index ff1420d..a8f5bef 100644 --- a/contrib/nnc/core/modelIR/operations/TransposeOp.cpp +++ b/contrib/nnc/core/modelIR/operations/TransposeOp.cpp @@ -20,7 +20,7 @@ namespace nnc { namespace mir { namespace ops { -TransposeOp::TransposeOp(const IODescriptor& arg, const std::vector& axis_order) +TransposeOp::TransposeOp(Output* arg, const std::vector& axis_order) : Operation(Type::transpose, {arg}), _axisOrder(axis_order) { assert(_axisOrder.size() == static_cast(getInputShape(0).rank())); inferOutputShapes(); diff --git a/contrib/nnc/include/core/modelIR/Operation.h b/contrib/nnc/include/core/modelIR/Operation.h index d0f5ec8..643deb1 100644 --- a/contrib/nnc/include/core/modelIR/Operation.h +++ b/contrib/nnc/include/core/modelIR/Operation.h @@ -49,7 +49,8 @@ public: Output& operator=(Output&&) = delete; /// @brief Returns the node this is an output of. - Operation* getNode() const { return _node; } + Operation* getNode() { return _node; } + const Operation* getNode() const { return _node; } /// @brief Returns the index of this output among all the ouptputs of the node. std::size_t getIndex() const { return _index; } @@ -89,7 +90,8 @@ public: Input& operator=(Input&&) = delete; /// @brief Returns the node this is the input of. - Operation* getNode() const { return _node; } + Operation* getNode() { return _node; } + const Operation* getNode() const { return _node; } /// @brief Returns the index of this output among all the inputs of the node. std::size_t getIndex() const { return _index; } @@ -175,9 +177,6 @@ private: std::deque _outputs; }; -// Convenient type alias for the duration of the transition process. -using IODescriptor = Operation::Output*; - } // namespace mir } // namespace nnc diff --git a/contrib/nnc/include/core/modelIR/ir_dot_builder.h b/contrib/nnc/include/core/modelIR/ir_dot_builder.h index 2bbf9ff..4ab524d 100644 --- a/contrib/nnc/include/core/modelIR/ir_dot_builder.h +++ b/contrib/nnc/include/core/modelIR/ir_dot_builder.h @@ -35,12 +35,12 @@ class IrDotBuilder public: explicit IrDotBuilder() = default; - void updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo); + void updateWithOp(const Operation* op, const DotIrNodeInfo& irNodeInfo); void writeDot(std::ostream &os); private: - void addNode(Operation* op, const DotIrNodeInfo& irNode); - void addEdge(Operation* op1, Operation* op2); + void addNode(const Operation* op, const DotIrNodeInfo& irNode); + void addEdge(const Operation* op1, const Operation* op2); std::stringstream dot; }; diff --git a/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h b/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h index c15a9a2..6ea6dde 100644 --- a/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h +++ b/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h @@ -25,7 +25,7 @@ namespace ops { class BatchNormOp : public Operation { public: - BatchNormOp(const IODescriptor& arg, float movingAvgFraction, float eps, bool spatial) + BatchNormOp(Output* arg, float movingAvgFraction, float eps, bool spatial) : Operation(Type::batchNorm, {arg}), _movingAvgFraction(movingAvgFraction), _eps(eps), _spatial(spatial) { // Infer output shape. diff --git a/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h b/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h index 3cbfaab..1698b70 100644 --- a/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h +++ b/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h @@ -26,8 +26,7 @@ namespace ops { class BiasAddOp : public Operation { public: - BiasAddOp(const IODescriptor& arg1, const IODescriptor& arg2) - : Operation(Type::biasAdd, {arg1, arg2}) { + BiasAddOp(Output* arg1, Output* arg2) : Operation(Type::biasAdd, {arg1, arg2}) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h b/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h index df05ac0..28f46e5 100644 --- a/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h @@ -25,8 +25,7 @@ namespace ops { class CappedReluOp : public Operation { public: - CappedReluOp(const IODescriptor& arg, float cap) - : Operation(Type::cappedReLU, {arg}), _cap(cap) { + CappedReluOp(Output* arg, float cap) : Operation(Type::cappedReLU, {arg}), _cap(cap) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/ConcatOp.h b/contrib/nnc/include/core/modelIR/operations/ConcatOp.h index 90c7798..fb9a76f 100644 --- a/contrib/nnc/include/core/modelIR/operations/ConcatOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ConcatOp.h @@ -28,7 +28,7 @@ namespace ops { */ class ConcatOp : public Operation { public: - ConcatOp(const std::vector& args, int32_t axis) + ConcatOp(const std::vector& args, int32_t axis) : Operation(Type::concat, args), _axis(axis) { inferOutputShapes(); } diff --git a/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h b/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h index bd9a9da..dd7b80c 100644 --- a/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h @@ -26,8 +26,8 @@ namespace ops { class Conv2DOp : public Operation { public: - Conv2DOp(const IODescriptor& input, - const IODescriptor& kernel, + Conv2DOp(Output* input, + Output* kernel, const Shape& strides, const std::vector& padding_before, const std::vector& padding_after) diff --git a/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h b/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h index 9a403b2..654e676 100644 --- a/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h @@ -27,8 +27,8 @@ namespace ops { class DeConv2DOp : public Operation { public: - DeConv2DOp(const IODescriptor& input, - const IODescriptor& kernel, + DeConv2DOp(Output* input, + Output* kernel, const Shape& strides, const std::vector& paddings) : Operation(Type::deConv2D, {input, kernel}), @@ -39,8 +39,8 @@ public: inferOutputShapes(); } - DeConv2DOp(const IODescriptor& input, - const IODescriptor& kernel, + DeConv2DOp(Output* input, + Output* kernel, const Shape& strides, PaddingType padding_type) : Operation(Type::deConv2D, {input, kernel}), @@ -52,8 +52,8 @@ public: inferOutputShapes(); } - DeConv2DOp(const IODescriptor& input, - const IODescriptor& kernel, + DeConv2DOp(Output* input, + Output* kernel, const Shape& strides, PaddingType padding_type, const Shape& output_shape) diff --git a/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h b/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h index ca71211..1863997 100644 --- a/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h @@ -26,8 +26,8 @@ namespace ops { class DepthwiseConv2DOp : public Operation { public: - DepthwiseConv2DOp(const IODescriptor& input, - const IODescriptor& kernel, + DepthwiseConv2DOp(Output* input, + Output* kernel, const Shape& strides, const std::vector& padding_before, const std::vector& padding_after) diff --git a/contrib/nnc/include/core/modelIR/operations/DropoutOp.h b/contrib/nnc/include/core/modelIR/operations/DropoutOp.h index 2a0c870..28c9424 100644 --- a/contrib/nnc/include/core/modelIR/operations/DropoutOp.h +++ b/contrib/nnc/include/core/modelIR/operations/DropoutOp.h @@ -25,7 +25,7 @@ namespace ops { class DropoutOp : public Operation { public: - DropoutOp(const IODescriptor& arg, float rate) : Operation(Type::dropout, {arg}), _rate(rate) { + DropoutOp(Output* arg, float rate) : Operation(Type::dropout, {arg}), _rate(rate) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h index 3d0c6a6..6067442 100644 --- a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h @@ -39,8 +39,8 @@ public: * @param op_type Type of operation to perform * @param num_inputs Number of inputs */ - ElementwiseOp(const std::vector& args, OpType op_type) - : Operation(Type::elementwise, args), _opType(op_type), _needsBroadcast(false) { + ElementwiseOp(const std::vector& args, OpType op_type) + : Operation(Type::elementwise, args), _opType(op_type), _needsBroadcast(false) { inferOutputShapes(); }; diff --git a/contrib/nnc/include/core/modelIR/operations/EluOp.h b/contrib/nnc/include/core/modelIR/operations/EluOp.h index d095914..952e920 100644 --- a/contrib/nnc/include/core/modelIR/operations/EluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/EluOp.h @@ -25,7 +25,7 @@ namespace ops { class EluOp : public Operation { public: - EluOp(const IODescriptor& arg, float alpha) : Operation(Type::ELU, {arg}), _alpha(alpha) { + EluOp(Output* arg, float alpha) : Operation(Type::ELU, {arg}), _alpha(alpha) { setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h b/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h index dcef052..feac36d 100644 --- a/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h +++ b/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h @@ -26,8 +26,7 @@ namespace ops { class FullyConnectedOp : public Operation { public: - FullyConnectedOp(const IODescriptor& arg1, const IODescriptor& arg2) - : Operation(Type::fullyConnected, {arg1, arg2}) { + FullyConnectedOp(Output* arg1, Output* arg2) : Operation(Type::fullyConnected, {arg1, arg2}) { inferOutputShapes(); } diff --git a/contrib/nnc/include/core/modelIR/operations/GatherOp.h b/contrib/nnc/include/core/modelIR/operations/GatherOp.h index 5e2b32c..1e63554 100644 --- a/contrib/nnc/include/core/modelIR/operations/GatherOp.h +++ b/contrib/nnc/include/core/modelIR/operations/GatherOp.h @@ -30,7 +30,7 @@ namespace ops { */ class GatherOp : public Operation { public: - GatherOp(const IODescriptor& data, const IODescriptor& indices, int32_t axis) + GatherOp(Output* data, Output* indices, int32_t axis) : Operation(Type::gather, {data, indices}), _axis(axis) { inferOutputShapes(); } diff --git a/contrib/nnc/include/core/modelIR/operations/GemmOp.h b/contrib/nnc/include/core/modelIR/operations/GemmOp.h index 8ab3ea5..d22f1bb 100644 --- a/contrib/nnc/include/core/modelIR/operations/GemmOp.h +++ b/contrib/nnc/include/core/modelIR/operations/GemmOp.h @@ -26,8 +26,7 @@ namespace ops { class GemmOp : public Operation { public: - GemmOp(IODescriptor a, IODescriptor b, IODescriptor c) : - Operation(Type::gemmOp, {a, b, c}) { + GemmOp(Output* a, Output* b, Output* c) : Operation(Type::gemmOp, {a, b, c}) { inferOutputShapes(); } diff --git a/contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h b/contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h index 0ff8964..ae0cf9c 100644 --- a/contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h @@ -25,8 +25,8 @@ namespace ops { class LeakyReluOp : public Operation { public: - explicit LeakyReluOp(const IODescriptor& arg, float alpha) - : Operation(Type::leakyReLU, {arg}), _alpha(alpha) { + explicit LeakyReluOp(Output* arg, float alpha) + : Operation(Type::leakyReLU, {arg}), _alpha(alpha) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/OutputOp.h b/contrib/nnc/include/core/modelIR/operations/OutputOp.h index c497835..49f0728 100644 --- a/contrib/nnc/include/core/modelIR/operations/OutputOp.h +++ b/contrib/nnc/include/core/modelIR/operations/OutputOp.h @@ -25,7 +25,7 @@ namespace ops { class OutputOp : public Operation { public: - explicit OutputOp(IODescriptor input) : Operation(Type::output, {input}) {} + explicit OutputOp(Output* input) : Operation(Type::output, {input}) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/PadOp.h b/contrib/nnc/include/core/modelIR/operations/PadOp.h index d4211f8..79368df 100644 --- a/contrib/nnc/include/core/modelIR/operations/PadOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PadOp.h @@ -32,12 +32,12 @@ class PadOp : public Operation { public: /** * @brief Class for Pad operation in modelIR - * @param arg IODescriptor + * @param arg The input * @param numDims Number of dimensions * @param paddings Vector with pairs of paddings (left, right) * @param scalar_value Constant value filling padded region */ - PadOp(const IODescriptor& arg, int32_t numDims, + PadOp(Output* arg, int32_t numDims, const std::vector>& paddings, const Scalar& scalar_value) : Operation(Type::pad, {arg}), _numDims(numDims), diff --git a/contrib/nnc/include/core/modelIR/operations/PoolOp.h b/contrib/nnc/include/core/modelIR/operations/PoolOp.h index 37ff694..6f25c5a 100644 --- a/contrib/nnc/include/core/modelIR/operations/PoolOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PoolOp.h @@ -39,7 +39,7 @@ public: EMPTY // Consider that there are no elements outside of input shape }; - PoolOp(const IODescriptor& arg, + PoolOp(Output* arg, PoolingType pooling_type, const Shape& window_shape, const Shape& strides, diff --git a/contrib/nnc/include/core/modelIR/operations/ReduceFOp.h b/contrib/nnc/include/core/modelIR/operations/ReduceFOp.h index 1bdce27..8ccb359 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReduceFOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReduceFOp.h @@ -36,7 +36,7 @@ public: * @param keep_dims whether to keep the original rank * @param func_type function to reduce the tensor with (should be associative) */ - ReduceFOp(const IODescriptor& arg, + ReduceFOp(Output* arg, const std::vector& reduce_dims, bool keep_dims, FuncType func_type) diff --git a/contrib/nnc/include/core/modelIR/operations/ReluOp.h b/contrib/nnc/include/core/modelIR/operations/ReluOp.h index 58208a6..92f52ad 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReluOp.h @@ -25,7 +25,7 @@ namespace ops { class ReluOp : public Operation { public: - explicit ReluOp(const IODescriptor& arg) : Operation(Type::ReLU, {arg}) { + explicit ReluOp(Output* arg) : Operation(Type::ReLU, {arg}) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h b/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h index 6164498..5559862 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h @@ -22,7 +22,7 @@ namespace ops { class ReshapeOp : public Operation { public: - ReshapeOp(const IODescriptor& arg, const Shape& shape) : Operation(Type::reshape, {arg}) { + ReshapeOp(Output* arg, const Shape& shape) : Operation(Type::reshape, {arg}) { const Shape& input_shape = getInputShape(0); auto output_shape = shape; diff --git a/contrib/nnc/include/core/modelIR/operations/ResizeOp.h b/contrib/nnc/include/core/modelIR/operations/ResizeOp.h index 3d04a86..1f9ed45 100644 --- a/contrib/nnc/include/core/modelIR/operations/ResizeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ResizeOp.h @@ -37,7 +37,7 @@ public: nearestNeighbor, // TODO: BICUBIC and BILINEAR }; - ResizeOp(const IODescriptor& arg, ResizeMethod mode, const std::vector& scales) + ResizeOp(Output* arg, ResizeMethod mode, const std::vector& scales) : Operation(Type::resizeIm, {arg}), _mode(mode), _scales(scales) { // Infer output shape based on given scales. auto& input_shape = getInputShape(0); @@ -51,7 +51,7 @@ public: setOutputShape(0, output_shape); } - ResizeOp(const IODescriptor& arg, ResizeMethod mode, const Shape& output_shape) + ResizeOp(Output* arg, ResizeMethod mode, const Shape& output_shape) : Operation(Type::resizeIm, {arg}), _mode(mode) { // Calculate scales based on given shape. auto& input_shape = getInputShape(0); diff --git a/contrib/nnc/include/core/modelIR/operations/ScaleOp.h b/contrib/nnc/include/core/modelIR/operations/ScaleOp.h index c8a9743..cd2d0e8 100644 --- a/contrib/nnc/include/core/modelIR/operations/ScaleOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ScaleOp.h @@ -25,8 +25,7 @@ namespace ops { class ScaleOp : public Operation { public: - ScaleOp(const IODescriptor& arg1, const IODescriptor& arg2) - : Operation(Type::scale, {arg1, arg2}) { + ScaleOp(Output* arg1, Output* arg2) : Operation(Type::scale, {arg1, arg2}) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h b/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h index 13494c0..11a76ca 100644 --- a/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h @@ -25,7 +25,7 @@ namespace ops { class SigmoidOp : public Operation { public: - explicit SigmoidOp(const IODescriptor& arg) : Operation(Type::sigmoid, {arg}) { + explicit SigmoidOp(Output* arg) : Operation(Type::sigmoid, {arg}) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/SliceOp.h b/contrib/nnc/include/core/modelIR/operations/SliceOp.h index 4787517..9c9cd6d 100644 --- a/contrib/nnc/include/core/modelIR/operations/SliceOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SliceOp.h @@ -25,10 +25,8 @@ namespace ops { class SliceOp : public Operation { public: - SliceOp(const IODescriptor& arg, const Shape& starts, const Shape& sizes) : - Operation(Type::slice, {arg}), - _starts(starts), - _sizes(sizes) { + SliceOp(Output* arg, const Shape& starts, const Shape& sizes) + : Operation(Type::slice, {arg}), _starts(starts), _sizes(sizes) { inferOutputShapes(); } diff --git a/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h b/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h index edafcef..055f20e 100644 --- a/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h @@ -28,7 +28,7 @@ namespace ops { */ class SoftmaxOp : public Operation { public: - SoftmaxOp(const IODescriptor& arg, int32_t axis) : Operation(Type::softmax, {arg}), _axis(axis) { + SoftmaxOp(Output* arg, int32_t axis) : Operation(Type::softmax, {arg}), _axis(axis) { setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/SqrtOp.h b/contrib/nnc/include/core/modelIR/operations/SqrtOp.h index 6382695..1704269 100644 --- a/contrib/nnc/include/core/modelIR/operations/SqrtOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SqrtOp.h @@ -26,7 +26,7 @@ namespace ops { class SqrtOp : public Operation { public: - SqrtOp(const IODescriptor& arg) : Operation(Type::sqrt, {arg}) { + SqrtOp(Output* arg) : Operation(Type::sqrt, {arg}) { setOutputShape(0, getInputShape(0)); }; }; diff --git a/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h b/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h index b17db47..243ab3e 100644 --- a/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h @@ -26,8 +26,8 @@ namespace ops { class SqueezeOp : public Operation { public: - SqueezeOp(const IODescriptor& arg, const std::vector& dims_to_squeeze) - : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze) { + SqueezeOp(Output* arg, const std::vector& dims_to_squeeze) + : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze) { // Infer output shape. inferOutputShapes(); } diff --git a/contrib/nnc/include/core/modelIR/operations/TanhOp.h b/contrib/nnc/include/core/modelIR/operations/TanhOp.h index 0e00a06..5db818f 100644 --- a/contrib/nnc/include/core/modelIR/operations/TanhOp.h +++ b/contrib/nnc/include/core/modelIR/operations/TanhOp.h @@ -25,7 +25,7 @@ namespace ops { class TanhOp : public Operation { public: - explicit TanhOp(const IODescriptor& arg) : Operation(Type::tanh, {arg}) { + explicit TanhOp(Output* arg) : Operation(Type::tanh, {arg}) { // Infer output shape. setOutputShape(0, getInputShape(0)); } diff --git a/contrib/nnc/include/core/modelIR/operations/TransposeOp.h b/contrib/nnc/include/core/modelIR/operations/TransposeOp.h index 7f11298..806eab5 100644 --- a/contrib/nnc/include/core/modelIR/operations/TransposeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/TransposeOp.h @@ -31,7 +31,7 @@ namespace ops { */ class TransposeOp : public Operation { public: - TransposeOp(const IODescriptor& arg, const std::vector& axis_order); + TransposeOp(Output* arg, const std::vector& axis_order); const std::vector& getAxisOrder() const { return _axisOrder; } diff --git a/contrib/nnc/include/passes/interpreter/Interpreter.h b/contrib/nnc/include/passes/interpreter/Interpreter.h index 6c93678..4c12ce8 100644 --- a/contrib/nnc/include/passes/interpreter/Interpreter.h +++ b/contrib/nnc/include/passes/interpreter/Interpreter.h @@ -35,7 +35,7 @@ public: void setInput(const std::string& name, const TensorVariant& data); - TensorVariant getResult(IODescriptor tensor); + TensorVariant getResult(const Operation::Output* tensor); void visit(ops::BatchNormOp& op) override; void visit(ops::BiasAddOp& op) override; @@ -69,8 +69,6 @@ public: void visit(ops::TanhOp& op) override; void visit(ops::TransposeOp& op) override; - void dump(Operation& op, bool all = false); - private: /// @brief Gets the computed inputs for the operation. std::vector> getInputTensors(const Operation& op); diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index 61de4bb..7d6e286 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -106,7 +106,7 @@ const ArtifactModule& AclCppOpGenerator::generate(mir::Graph* g) { void AclCppOpGenerator::visit(ops::ConcatOp& op) { const auto& ir_inputs = op.getInputs(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_output = op.getOutput(0); static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES", "arm_compute::DataLayoutDimension::CHANNEL", @@ -143,8 +143,8 @@ void AclCppOpGenerator::visit(ops::DepthwiseConv2DOp& op) { void AclCppOpGenerator::visit(ops::SoftmaxOp& op) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); auto in = AF::id(tensorName(ir_input)); @@ -262,8 +262,8 @@ AclCppOpGenerator::genTransposeACLtoMIR(const string& name, void AclCppOpGenerator::visit(ops::PoolOp& op) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); const char* pooling_type = nullptr; @@ -329,11 +329,11 @@ void AclCppOpGenerator::visit(ops::PoolOp& op) { void AclCppOpGenerator::visit(ops::FullyConnectedOp& op) { assert(op.getNumInputs() == 2); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_weights = op.getInput(1)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_weights = op.getInput(1)->getProducer(); + const auto* ir_output = op.getOutput(0); - auto ir_weights_op = dynamic_cast(ir_weights->getNode()); + auto ir_weights_op = dynamic_cast(ir_weights->getNode()); if (ir_weights_op == nullptr) throw AclCppException("Unsupported operation type"); @@ -373,11 +373,11 @@ void AclCppOpGenerator::visit(ops::CappedReluOp& op) { void AclCppOpGenerator::visit(ops::BiasAddOp& op) { assert(op.getNumInputs() == 2); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_weights = op.getInput(1)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_weights = op.getInput(1)->getProducer(); + const auto* ir_output = op.getOutput(0); - auto ir_weights_op = dynamic_cast(ir_weights->getNode()); + auto ir_weights_op = dynamic_cast(ir_weights->getNode()); if (ir_weights_op == nullptr) throw AclCppException("Unsupported operation type"); @@ -459,8 +459,8 @@ void AclCppOpGenerator::visit(ops::InputOp& op) { addToPersistentTensors(tensor); } -// FIXME: temporary decision -static bool shouldSerializeConstant(ops::ConstantOp& op) { +// FIXME: temporary solution +static bool shouldSerializeConstant(const ops::ConstantOp& op) { // Operations from 'self_serializing_ops_to_inputs' serializing tensors with appropriate index themselves, // so we don't serialize them here, also we don't serialize tensors from dangling ConstantOp static std::map self_serializing_ops_to_inputs{ @@ -499,8 +499,8 @@ void AclCppOpGenerator::visit(ops::ReluOp& op) { void AclCppOpGenerator::visit(ops::ReshapeOp& op) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); // Get the id of the input tensor in the generated artifact. auto in = AF::id(tensorName(ir_input)); @@ -532,11 +532,11 @@ void AclCppOpGenerator::visit(ops::ScaleOp& op) { // May be not a perfect implementation, using the CLPixelWiseMultiplication ACL function taking // two input tensors with the same shapes. assert(op.getNumInputs() == 2); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_weights = op.getInput(1)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_weights = op.getInput(1)->getProducer(); + const auto* ir_output = op.getOutput(0); - auto ir_weights_op = dynamic_cast(ir_weights->getNode()); + auto ir_weights_op = dynamic_cast(ir_weights->getNode()); if (ir_weights_op == nullptr) throw AclCppException("Unsupported operation type"); @@ -634,8 +634,8 @@ void AclCppOpGenerator::visit(ops::BatchNormOp&) { void AclCppOpGenerator::visit(ops::DropoutOp& op) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); // Just copy input tensor to the output one. @@ -655,7 +655,7 @@ void AclCppOpGenerator::visit(ops::TanhOp& op) { void AclCppOpGenerator::visit(ops::ElementwiseOp& op) { assert(op.getNumInputs() >= 2); const auto& ir_inputs = op.getInputs(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_output = op.getOutput(0); // Create the output tensor in the DOM and obtain its identifier. auto out = genTensor(ir_output); @@ -665,7 +665,7 @@ void AclCppOpGenerator::visit(ops::ElementwiseOp& op) { auto in1 = AF::id(tensorName(ir_inputs[0].getProducer())); for (size_t i = 1; i < ir_inputs.size(); ++i) { - IODescriptor ir_input = ir_inputs[i].getProducer(); + const auto* ir_input = ir_inputs[i].getProducer(); // Get the identifier of the second input tensor in the DOM. auto in2 = AF::id(tensorName(ir_input)); @@ -698,8 +698,8 @@ void AclCppOpGenerator::visit(ops::EluOp&) { void AclCppOpGenerator::visit(ops::PadOp& op) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); // Get the id of the input tensor. auto input = AF::id(tensorName(ir_input)); @@ -729,11 +729,11 @@ void AclCppOpGenerator::visit(ops::PadOp& op) { template void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, const string& suffix) { - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_weights = op.getInput(1)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_weights = op.getInput(1)->getProducer(); + const auto* ir_output = op.getOutput(0); - auto ir_weights_op = dynamic_cast(ir_weights->getNode()); + auto ir_weights_op = dynamic_cast(ir_weights->getNode()); if (ir_weights_op == nullptr) throw AclCppException("Unsupported operation type"); @@ -803,11 +803,11 @@ void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, cons genTensorDeallocation(_infBlock, transposed_output); } -void AclCppOpGenerator::genActivation(mir::Operation& op, const std::string& activation_name, +void AclCppOpGenerator::genActivation(const Operation& op, const std::string& activation_name, float a, float b) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); // Get the id of the input tensor. auto in = AF::id(tensorName(ir_input)); @@ -919,7 +919,7 @@ shared_ptr AclCppOpGenerator::genMultiplication(const string& prefix return out; } -string AclCppOpGenerator::tensorName(IODescriptor ir_tensor) const { +string AclCppOpGenerator::tensorName(const Operation::Output* ir_tensor) const { string tensor_name; // TODO Use the tensor name instead of the operation name. @@ -980,7 +980,7 @@ shared_ptr AclCppOpGenerator::genTensor(const string& name, return id; } -shared_ptr AclCppOpGenerator::genTensor(IODescriptor ir_tensor) { +shared_ptr AclCppOpGenerator::genTensor(const Operation::Output* ir_tensor) { return genTensor(tensorName(ir_tensor), ir_tensor->getShape(), !ir_tensor->getNode()->getName().empty()); } @@ -988,7 +988,7 @@ shared_ptr AclCppOpGenerator::genTensor(IODescriptor ir_tensor) { void AclCppOpGenerator::genNamed(Graph* graph) { const auto& inputs = graph->getInputs(); if (inputs.size() == 1) { - auto* input_op = inputs[0]; + const auto* input_op = inputs[0]; auto f = _artifactClass->func(true, "arm_compute::CLTensor&", "getInput"); auto b = f->getBlock(); auto id = AF::id(tensorName(input_op->getOutput(0))); @@ -997,7 +997,7 @@ void AclCppOpGenerator::genNamed(Graph* graph) { const auto& outputs = graph->getOutputs(); if (outputs.size() == 1) { - auto* output_op = outputs[0]; + const auto* output_op = outputs[0]; auto f = _artifactClass->func(true, "arm_compute::CLTensor&", "getOutput"); auto b = f->getBlock(); auto id = AF::id(tensorName(output_op->getInput(0)->getProducer())); @@ -1132,8 +1132,8 @@ void AclCppOpGenerator::genTranspose(const std::shared_ptr& inp void AclCppOpGenerator::visit(mir::ops::TransposeOp& op) { assert(op.getNumInputs() == 1); - IODescriptor ir_input = op.getInput(0)->getProducer(); - IODescriptor ir_output = op.getOutput(0); + const auto* ir_input = op.getInput(0)->getProducer(); + const auto* ir_output = op.getOutput(0); // Get the input node tensor id in the DOM. shared_ptr input = AF::id(tensorName(ir_input)); diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h index 45f824b..b32b720 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h @@ -130,7 +130,8 @@ private: * @param b - betha parameter used by some activation functions: LINEAR, LU_BOUNDED_RELU, TANH. */ void - genActivation(mir::Operation& op, const std::string& activation_name, float a = 0, float b = 0); + genActivation(const mir::Operation& op, const std::string& activation_name, float a = 0, + float b = 0); /** * @brief Used to generate a binary addition operation in handling of the elementwise. @@ -184,7 +185,7 @@ private: /** * @brief Generates a unique name for the tensor. */ - std::string tensorName(mir::IODescriptor ir_tensor) const; + std::string tensorName(const mir::Operation::Output* ir_tensor) const; /** * @brief Generates variables tensor shape in DOM. @@ -215,7 +216,7 @@ private: * @param ir_tensor - the ModelIR tensor. * @return - a DOM identifier for the created tensor. */ - std::shared_ptr genTensor(mir::IODescriptor ir_tensor); + std::shared_ptr genTensor(const mir::Operation::Output* ir_tensor); /** * @brief generate transposing operation, @p mir_perm contains dimensions in MIR order (batch has index 0) diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp index a063967..c58f7e2 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp @@ -150,13 +150,13 @@ void Caffe2Importer::preloadAllTensors() { } void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) { - std::vector outputs; + std::vector outputs; // If op input not met yet - consider it as model input if (op.input_size() > 0 - && _blobNameToIODescriptor.find(op.input(0)) == _blobNameToIODescriptor.end()) { + && _blobNameToOutput.find(op.input(0)) == _blobNameToOutput.end()) { outputs = _opCreator->createInput(op.input(0), _inputShapes.front()); - _blobNameToIODescriptor[op.input(0)] = outputs.at(0); + _blobNameToOutput[op.input(0)] = outputs.at(0); _inputShapes.erase(_inputShapes.begin(), _inputShapes.begin() + 1); } @@ -223,8 +223,8 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) { for (int i = 0; i < outputs.size(); ++i) { // caffe2 input blob name could be same as output blob name, and next line will overwrite - // '_blobNameToIODescriptor' element, but in all networks that I saw it was not a problem - _blobNameToIODescriptor[op.output(i)] = outputs.at(i); + // '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem + _blobNameToOutput[op.output(i)] = outputs.at(i); } _lastMIROp = outputs.at(0)->getNode(); @@ -259,18 +259,18 @@ mir::TensorVariant Caffe2Importer::createTensor(const OperatorDef& op) { return mir::TensorVariant(element_type, tensor_shape, src_data); } -std::vector Caffe2Importer::getInputMIROps(const OperatorDef& op) { +std::vector Caffe2Importer::getInputMIROps(const OperatorDef& op) { // caffe2 operation inputs not same as MIR inputs (ex: in caffe2 conv kernel and bias also inputs) // so choose caffe2 inputs, which are 'real' inputs - std::vector inputs; + std::vector inputs; SupportedCaffe2OpType opType = _operatorTypes.at(op.type()); if (opType != SupportedCaffe2OpType::givenTensorFill && opType != SupportedCaffe2OpType::constantFill && opType != SupportedCaffe2OpType::givenTensorInt64Fill) { for (auto& i : op.input()) - if (_blobNameToIODescriptor.find(i) != _blobNameToIODescriptor.end()) - inputs.push_back(_blobNameToIODescriptor[i]); + if (_blobNameToOutput.find(i) != _blobNameToOutput.end()) + inputs.push_back(_blobNameToOutput[i]); } return inputs; diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.h b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.h index ca9fad9..4849671 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.h +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.h @@ -65,9 +65,8 @@ private: // set of strings describing incorrect parts of network and parts of network unsupported by NNC std::set _problemsOpSet; - // This map maps caffe2 operators names to MIR operators - // that correspond to previous caffe2 operators - std::unordered_map _blobNameToIODescriptor; + // Maps Caffe2 operator input names to corresponding MIR operation outputs. + std::unordered_map _blobNameToOutput; mir::Operation* _lastMIROp = nullptr; std::map _MIRTensors; @@ -99,9 +98,9 @@ private: mir::TensorVariant createTensor(const ::caffe2::OperatorDef& op); /** - * @brief Returns MIR ops, under given caffe2 op + * @brief Returns MIR operation outputs corresponding to the inputs of the given operator. */ - std::vector getInputMIROps(const ::caffe2::OperatorDef&); + std::vector getInputMIROps(const ::caffe2::OperatorDef&); /** * @brief Mark output MIR nodes diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp index d119a93..413552e 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp @@ -124,7 +124,7 @@ getStrides(const ::caffe2::OperatorDef& op) { } static Shape getWindowShape(const ::caffe2::OperatorDef& op, - const std::vector& inputs) { + const std::vector& inputs) { int is_global_pooling = getSingleArgument(op, "global_pooling", 0); bool has_custom_kernel_size = hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w"); @@ -158,13 +158,13 @@ static Shape getWindowShape(const ::caffe2::OperatorDef& op, return Shape{kernel_h, kernel_w}; } -mir::IODescriptor Caffe2OpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) { +mir::Operation::Output* Caffe2OpCreator::convertCaffeToMIR(mir::Operation::Output* arg) { // NCHW -> NHWC auto transpose = createOp("CaffeToMIR", arg, std::vector{0, 2, 3, 1}); return transpose->getOutput(0); } -mir::IODescriptor Caffe2OpCreator::convertMIRToCaffe(const mir::IODescriptor& arg) { +mir::Operation::Output* Caffe2OpCreator::convertMIRToCaffe(mir::Operation::Output* arg) { // NHWC -> NCHW auto transpose = createOp("MIRToCaffe", arg, std::vector{0, 3, 1, 2}); return transpose->getOutput(0); @@ -230,12 +230,12 @@ void Caffe2OpCreator::commonCheck(const ::caffe2::OperatorDef& op, // Convert functions // -std::vector -Caffe2OpCreator::convertAdd(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertAdd(const std::vector& inputs, const ::caffe2::OperatorDef& op, const MIRTensors& mir_tensors) { - std::vector add_input; + std::vector add_input; for (const auto& i : inputs) add_input.push_back(convertCaffeToMIR(i)); @@ -250,8 +250,8 @@ Caffe2OpCreator::convertAdd(const std::vector& inputs, return {convertMIRToCaffe(add->getOutput(0))}; } -std::vector -Caffe2OpCreator::convertAveragePool(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertAveragePool(const std::vector& inputs, const OperatorDef& op) { Shape window_shape = getWindowShape(op, inputs); @@ -269,9 +269,10 @@ Caffe2OpCreator::convertAveragePool(const std::vector& inputs, return {convertMIRToCaffe(pooling->getOutput(0))}; } -std::vector Caffe2OpCreator::convertConv(const std::vector& inputs, - const ::caffe2::OperatorDef& op, - const MIRTensors& mir_tensors) { +std::vector +Caffe2OpCreator::convertConv(const std::vector& inputs, + const ::caffe2::OperatorDef& op, + const MIRTensors& mir_tensors) { // dilation order: h w (not used) Shape stride_shape(getStrides(op)); @@ -309,15 +310,17 @@ std::vector Caffe2OpCreator::convertConv(const std::vectorgetOutput(0))}; } -std::vector Caffe2OpCreator::convertConcat(const std::vector& inputs, - const ::caffe2::OperatorDef& op) { +std::vector +Caffe2OpCreator::convertConcat(const std::vector& inputs, + const ::caffe2::OperatorDef& op) { int axis = getSingleArgument(op, "axis", 1); auto result = createOp("Concat", inputs, axis); return {result->getOutput(0)}; } -std::vector Caffe2OpCreator::convertDropout(const std::vector& inputs, - const ::caffe2::OperatorDef& op) { +std::vector +Caffe2OpCreator::convertDropout(const std::vector& inputs, + const ::caffe2::OperatorDef& op) { int is_test = getSingleArgument(op, "is_test", 0); if (is_test) return {inputs[0]}; @@ -327,8 +330,8 @@ std::vector Caffe2OpCreator::convertDropout(const std::vectorgetOutput(0)}; } -std::vector -Caffe2OpCreator::convertFullyConnected(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertFullyConnected(const std::vector& inputs, const ::caffe2::OperatorDef& op, const MIRTensors& mir_tensors) { auto weights_tensor = transposeTensor<1, 0>(mir_tensors.at(op.input(1))); @@ -347,8 +350,9 @@ Caffe2OpCreator::convertFullyConnected(const std::vector& inputs, } -std::vector Caffe2OpCreator::convertMaxPool(const std::vector& inputs, - const OperatorDef& op) { +std::vector +Caffe2OpCreator::convertMaxPool(const std::vector& inputs, + const OperatorDef& op) { Shape window_shape = getWindowShape(op, inputs); Shape strides(getStrides(op)); @@ -364,12 +368,12 @@ std::vector Caffe2OpCreator::convertMaxPool(const std::vectorgetOutput(0))}; } -std::vector -Caffe2OpCreator::convertMul(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertMul(const std::vector& inputs, const ::caffe2::OperatorDef& op, const MIRTensors& mir_tensors) { - std::vector input_descriptors; + std::vector input_descriptors; for (const auto& i: inputs) input_descriptors.push_back(convertCaffeToMIR(i)); @@ -384,14 +388,14 @@ Caffe2OpCreator::convertMul(const std::vector& inputs, return {convertMIRToCaffe(mul->getOutput(0))}; } -std::vector -Caffe2OpCreator::convertRelu(const std::vector& inputs) { +std::vector +Caffe2OpCreator::convertRelu(const std::vector& inputs) { auto relu = createOp("Relu", inputs[0]); return {relu->getOutput(0)}; } -std::vector -Caffe2OpCreator::convertResizeNearest(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertResizeNearest(const std::vector& inputs, const ::caffe2::OperatorDef& op) { // assume NCHW and convert to MIR (NHWC) std::vector scales(4); @@ -406,21 +410,22 @@ Caffe2OpCreator::convertResizeNearest(const std::vector& inputs, return {convertMIRToCaffe(resize->getOutput(0))}; } -std::vector -Caffe2OpCreator::convertSigmoid(const std::vector& inputs) { +std::vector +Caffe2OpCreator::convertSigmoid(const std::vector& inputs) { auto result = createOp("Sigmoid", inputs[0]); return {result->getOutput(0)}; } -std::vector Caffe2OpCreator::convertSoftmax(const std::vector& inputs, - const ::caffe2::OperatorDef& op) { +std::vector +Caffe2OpCreator::convertSoftmax(const std::vector& inputs, + const ::caffe2::OperatorDef& op) { int axis = getSingleArgument(op, "axis", 1); auto softmax = createOp("Softmax", inputs[0], axis); return {softmax->getOutput(0)}; } -std::vector -Caffe2OpCreator::convertSpatialBN(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertSpatialBN(const std::vector& inputs, const ::caffe2::OperatorDef& op, const MIRTensors& mir_tensors) { // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias @@ -453,7 +458,8 @@ Caffe2OpCreator::convertSpatialBN(const std::vector& inputs, return {convertMIRToCaffe(result->getOutput(0))}; } -std::vector Caffe2OpCreator::convertSum(const std::vector& inputs) { +std::vector +Caffe2OpCreator::convertSum(const std::vector& inputs) { const auto& input_shape = inputs[0]->getShape(); for (auto& in : inputs) assert(input_shape == in->getShape() && "All Sum inputs must have same shape"); @@ -462,8 +468,8 @@ std::vector Caffe2OpCreator::convertSum(const std::vectorgetOutput(0)}; } -std::vector -Caffe2OpCreator::convertClip(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertClip(const std::vector& inputs, const ::caffe2::OperatorDef& op) { float max = getSingleArgument(op, "max", float(0)); @@ -476,8 +482,8 @@ Caffe2OpCreator::convertClip(const std::vector& inputs, } -std::vector -Caffe2OpCreator::convertReshape(const std::vector& inputs, +std::vector +Caffe2OpCreator::convertReshape(const std::vector& inputs, const ::caffe2::OperatorDef& op, const MIRTensors& mir_tensors) { // Check new shape input @@ -498,7 +504,7 @@ Caffe2OpCreator::convertReshape(const std::vector& inputs, return {reshape->getOutput(0)}; } -std::vector +std::vector Caffe2OpCreator::createInput(const std::string& name, const mir::Shape& shape) { auto variable = _graph->create(name, shape); return {variable->getOutput(0)}; diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h index 726925c..559ad45 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h @@ -41,73 +41,91 @@ class Caffe2OpCreator { public: explicit Caffe2OpCreator(Graph* g) : _graph(g) {}; - void checkAdd(const ::caffe2::OperatorDef&, std::set&); - void checkConvLikeOp(const ::caffe2::OperatorDef&, std::set&); void checkFC(const ::caffe2::OperatorDef&, std::set&); - void checkMul(const ::caffe2::OperatorDef&, std::set&); - void checkSpatialBN(const ::caffe2::OperatorDef&, std::set&); void commonCheck(const ::caffe2::OperatorDef&, std::set&); - std::vector convertAdd(const std::vector&, - const ::caffe2::OperatorDef&, const MIRTensors&); + std::vector + createInput(const std::string& name, const mir::Shape& shape); - std::vector convertAveragePool(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertAdd(const std::vector&, + const ::caffe2::OperatorDef&, + const MIRTensors&); - std::vector convertConv(const std::vector&, - const ::caffe2::OperatorDef&, const MIRTensors&); + std::vector + convertAveragePool(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector convertConcat(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertConv(const std::vector&, + const ::caffe2::OperatorDef&, + const MIRTensors&); - std::vector convertDropout(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertConcat(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector convertFullyConnected(const std::vector&, - const ::caffe2::OperatorDef&, - const MIRTensors&); + std::vector + convertDropout(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector createInput(const std::string& name, const mir::Shape& shape); + std::vector + convertFullyConnected(const std::vector&, + const ::caffe2::OperatorDef&, + const MIRTensors&); - std::vector convertMaxPool(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertMaxPool(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector convertMul(const std::vector&, - const ::caffe2::OperatorDef&, const MIRTensors&); + std::vector + convertMul(const std::vector&, + const ::caffe2::OperatorDef&, + const MIRTensors&); - std::vector convertRelu(const std::vector&); + std::vector + convertRelu(const std::vector&); - std::vector convertResizeNearest(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertResizeNearest(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector convertSigmoid(const std::vector&); + std::vector + convertSigmoid(const std::vector&); - std::vector convertSoftmax(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertSoftmax(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector convertSpatialBN(const std::vector&, - const ::caffe2::OperatorDef&, const MIRTensors&); + std::vector + convertSpatialBN(const std::vector&, + const ::caffe2::OperatorDef&, + const MIRTensors&); - std::vector convertSum(const std::vector&); + std::vector + convertSum(const std::vector&); - std::vector convertClip(const std::vector&, - const ::caffe2::OperatorDef&); + std::vector + convertClip(const std::vector&, + const ::caffe2::OperatorDef&); - std::vector convertReshape(const std::vector&, - const ::caffe2::OperatorDef&, const MIRTensors&); + std::vector + convertReshape(const std::vector&, + const ::caffe2::OperatorDef&, + const MIRTensors&); private: Graph* _graph = nullptr; - mir::IODescriptor convertCaffeToMIR(const mir::IODescriptor& arg); + mir::Operation::Output* convertCaffeToMIR(mir::Operation::Output* arg); - mir::IODescriptor convertMIRToCaffe(const mir::IODescriptor& arg); + mir::Operation::Output* convertMIRToCaffe(mir::Operation::Output* arg); template mir::Operation* createOp(const std::string& name, Types&& ... args); diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp index a715139..bf4a72c 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp @@ -82,12 +82,10 @@ void CaffeImporter::collectUnsupportedLayers() { } void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& layer) { - auto inputs = getMIRInputsForLayer(layer); + std::vector inputs = getMIRInputsForLayer(layer); + std::vector outputs; - std::vector outputs; - CaffeOpType op_type = _operatorTypes.at(layer.type()); - - switch (op_type) { + switch (_operatorTypes.at(layer.type())) { case CaffeOpType::input: outputs = _opCreator->convertInput(layer); break; @@ -151,7 +149,7 @@ void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& layer) { assert(layer.top_size() == static_cast(outputs.size()) && "Number of outputs differs."); for (int i = 0; i < layer.top_size(); ++i) - _blobNameToIODescriptor[layer.top(i)] = outputs.at(i); + _blobNameToOpOutput[layer.top(i)] = outputs.at(i); } void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) { @@ -207,11 +205,12 @@ void CaffeImporter::processDeprecatedInput() { throw PassException("Deprecated Caffe input types are not supported"); } -std::vector CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) { - std::vector inputs; +std::vector +CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) { + std::vector inputs; for (const auto& input_name : layer.bottom()) - inputs.push_back(_blobNameToIODescriptor.at(input_name)); + inputs.push_back(_blobNameToOpOutput.at(input_name)); return inputs; } @@ -221,7 +220,7 @@ void CaffeImporter::setGraphOutputs() { // For now, we assume that: // - there is exactly one output; // - the output is from the last layer. - auto output = _blobNameToIODescriptor[last_layer.top(0)]; + auto output = _blobNameToOpOutput[last_layer.top(0)]; _graph->create(output->getNode()->getName(), output); output->getNode()->setName(""); } diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.h b/contrib/nnc/passes/caffe_frontend/caffe_importer.h index 731ee3c..7887c56 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.h @@ -46,8 +46,8 @@ private: mir::Graph* _graph; std::unique_ptr _opCreator; - // Maps Caffe blob name to MIR IODescriptor. - std::map _blobNameToIODescriptor; + // Maps Caffe blob names to corresponding MIR operation outputs. + std::map _blobNameToOpOutput; static const std::map _operatorTypes; // set of strings describing incorrect parts of network and parts of network unsupported by NNC @@ -75,9 +75,9 @@ private: void collectUnsupportedOp(const ::caffe::LayerParameter& lp); /** - * @brief Return MIR IODescriptors for the inputs of the given layer. + * @brief Returns MIR operation outputs corresponding to the inputs of the given layer. */ - std::vector getMIRInputsForLayer(const ::caffe::LayerParameter& layer); + std::vector getMIRInputsForLayer(const ::caffe::LayerParameter& layer); void processDeprecatedInput(); }; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 3c77fe4..8aefe8c 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -58,33 +58,35 @@ namespace nnc { using namespace mir; using namespace ::caffe; -mir::IODescriptor CaffeOpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) { +mir::Operation::Output* CaffeOpCreator::convertCaffeToMIR(mir::Operation::Output* arg) { // NCHW -> NHWC auto transpose = createOp("", arg, std::vector{0, 2, 3, 1}); return transpose->getOutput(0); } -mir::IODescriptor CaffeOpCreator::convertMIRToCaffe(const mir::IODescriptor& arg) { +mir::Operation::Output* CaffeOpCreator::convertMIRToCaffe(mir::Operation::Output* arg) { // NHWC -> NCHW auto transpose = createOp("", arg, std::vector{0, 3, 1, 2}); return transpose->getOutput(0); } -mir::IODescriptor CaffeOpCreator::createAdd(mir::IODescriptor arg1, mir::IODescriptor arg2) { - std::vector inputs{arg1, arg2}; +mir::Operation::Output* +CaffeOpCreator::createAdd(mir::Operation::Output* arg1, mir::Operation::Output* arg2) { + std::vector inputs{arg1, arg2}; auto op = createOp("", inputs, ops::ElementwiseOp::OpType::add); return op->getOutput(0); } -mir::IODescriptor CaffeOpCreator::createMul(mir::IODescriptor arg1, mir::IODescriptor arg2) { - std::vector inputs{arg1, arg2}; +mir::Operation::Output* +CaffeOpCreator::createMul(mir::Operation::Output* arg1, mir::Operation::Output* arg2) { + std::vector inputs{arg1, arg2}; auto op = createOp("", inputs, ops::ElementwiseOp::OpType::mul); return op->getOutput(0); } /// @brief Split arg into @p num_parts equal parts along @p axis axis. -std::vector -CaffeOpCreator::createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t axis) { +std::vector +CaffeOpCreator::createSplit(mir::Operation::Output* arg, int32_t num_parts, int32_t axis) { const auto& arg_shape = arg->getShape(); assert(axis >= 0 && axis < arg_shape.rank()); @@ -95,7 +97,7 @@ CaffeOpCreator::createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t ax Shape sizes(arg_shape); sizes.dim(axis) = part_size; - std::vector outputs(num_parts); + std::vector outputs(num_parts); for (int32_t i = 0; i < num_parts; ++i) { outputs[i] = createOp("", arg, starts, sizes)->getOutput(0); starts.dim(axis) += part_size; @@ -105,9 +107,9 @@ CaffeOpCreator::createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t ax } /// @brief Helper function for creating FullyConnected operation with non-square input. -IODescriptor -CaffeOpCreator::createFullyConnected(const mir::IODescriptor& input, - const mir::IODescriptor& weights, +mir::Operation::Output* +CaffeOpCreator::createFullyConnected(mir::Operation::Output* input, + mir::Operation::Output* weights, int32_t axis) { const auto& input_shape = input->getShape(); const auto& weights_shape = weights->getShape(); @@ -153,12 +155,12 @@ TensorVariant CaffeOpCreator::convertBlob(const BlobProto& blob) { return TensorVariant(dtype, shape, src_data); } -std::vector +std::vector CaffeOpCreator::convertInput(const LayerParameter& layer) { const auto& params = layer.input_param(); const auto num_inputs = layer.top_size(); const auto num_shapes = params.shape_size(); - std::vector descriptors; + std::vector outputs; assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes."); @@ -167,10 +169,10 @@ CaffeOpCreator::convertInput(const LayerParameter& layer) { const auto& blob_shape = params.shape(num_shapes == 1 ? 0 : i); Shape shape = ShapeHelper::createShape(blob_shape.dim(), blob_shape.dim_size()); auto variable = createOp(blob_name, shape); - descriptors.push_back(variable->getOutput(0)); + outputs.push_back(variable->getOutput(0)); } - return descriptors; + return outputs; } static void convertConvolutionParam(const ConvolutionParameter& conv_param, Shape& strides, @@ -228,9 +230,9 @@ void CaffeOpCreator::checkConvolution(const ConvolutionParameter& opts, problems_op_set.insert("Conv2D: Unsupported number of pads"); } -std::vector +std::vector CaffeOpCreator::convertConvolution(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.convolution_param(); Shape strides; std::vector padding; @@ -273,9 +275,9 @@ CaffeOpCreator::convertConvolution(const caffe::LayerParameter& layer, return {convertMIRToCaffe(result->getOutput(0))}; } -std::vector +std::vector CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.convolution_param(); Shape strides; std::vector padding; @@ -302,9 +304,9 @@ CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter& layer, return {convertMIRToCaffe(result->getOutput(0))}; } -std::vector +std::vector CaffeOpCreator::convertInnerProduct(const LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.inner_product_param(); auto weights_tensor = convertBlob(layer.blobs(0)); @@ -323,9 +325,9 @@ CaffeOpCreator::convertInnerProduct(const LayerParameter& layer, return {result}; } -std::vector +std::vector CaffeOpCreator::convertConcat(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.concat_param(); auto concat = createOp(layer.name(), inputs, params.axis()); return {concat->getOutput(0)}; @@ -397,9 +399,9 @@ void CaffeOpCreator::checkPooling(const PoolingParameter& opts, problemsOpSet.insert("Pooling: conflicting padding properties in pooling"); } -std::vector +std::vector CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.pooling_param(); Shape window_shape; Shape strides; @@ -427,9 +429,9 @@ CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer, return {convertMIRToCaffe(pooling->getOutput(0))}; } -std::vector +std::vector CaffeOpCreator::convertSoftmax(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.softmax_param(); // CPP and ACL backends are able to perform Softmax only along the last axis. @@ -472,18 +474,18 @@ void CaffeOpCreator::checkReshape(const ReshapeParameter& opts, * @todo Decide how to react to the absence of "shape" parameter. * @todo Support zero values in "shape" parameter. */ -std::vector +std::vector CaffeOpCreator::convertReshape(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.reshape_param(); Shape new_shape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size()); auto reshape = createOp(layer.name(), inputs[0], new_shape); return {reshape->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { mir::Operation* relu; if (layer.relu_param().has_negative_slope()) { float alpha = layer.relu_param().negative_slope(); @@ -495,9 +497,9 @@ CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer, return {relu->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertScale(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.scale_param(); auto scale = createOp("", convertBlob(layer.blobs(0)))->getOutput(0); auto result = createOp(layer.name(), convertCaffeToMIR(inputs[0]), scale); @@ -520,9 +522,9 @@ void CaffeOpCreator::checkBatchNorm(const caffe::LayerParameter& layer, "Unexpected shape of scale parameter in batch norm"); } -std::vector +std::vector CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.batch_norm_param(); float eps = opts.eps(); auto scale_weight = convertBlob(layer.blobs(2)); @@ -555,25 +557,25 @@ CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter& layer, return {convertMIRToCaffe(result->getOutput(0))}; } -std::vector +std::vector CaffeOpCreator::convertDropout(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.dropout_param(); auto dropout = createOp(layer.name(), inputs[0], opts.dropout_ratio()); return {dropout->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertELU(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.elu_param(); auto elu = createOp(layer.name(), inputs[0], opts.alpha()); return {elu->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.embed_param(); auto data = createOp(layer.name() + ".weights", convertBlob(layer.blobs(0))); auto result = createOp(layer.name(), data->getOutput(0), inputs[0], 0); @@ -587,26 +589,26 @@ CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer, return {result->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertSigmoid(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto result = createOp(layer.name(), inputs[0]); return {result->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertTanH(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto tanh = createOp(layer.name(), inputs[0]); return {tanh->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertEltwise(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { auto& opts = layer.eltwise_param(); ops::ElementwiseOp::OpType optype; - std::vector input_tensors; + std::vector input_tensors; switch (opts.operation()){ case EltwiseParameter_EltwiseOp_PROD: optype = ops::ElementwiseOp::OpType::mul; @@ -621,7 +623,7 @@ CaffeOpCreator::convertEltwise(const caffe::LayerParameter& layer, if (opts.coeff().Get(i) != 1.0f) { TensorVariant coeff_tensor(DTYPE::FLOAT32, Shape{1}, &opts.coeff().Get(i)); auto coeff_const = createOp(layer.name() + "_const", coeff_tensor); - std::vector mul_inputs; + std::vector mul_inputs; mul_inputs.push_back(coeff_const->getOutput(0)); mul_inputs.push_back(inputs[i]); auto mul = createOp(layer.name() + "_mul", @@ -646,10 +648,10 @@ CaffeOpCreator::convertEltwise(const caffe::LayerParameter& layer, return {elementwise->getOutput(0)}; } -std::vector +std::vector CaffeOpCreator::convertSplit(const caffe::LayerParameter& layer, - const std::vector& inputs) { - std::vector outputs(layer.top_size(), inputs.at(0)); + const std::vector& inputs) { + std::vector outputs(layer.top_size(), inputs.at(0)); return outputs; } @@ -719,9 +721,9 @@ static TensorVariant createZeroedTensor(const mir::Shape& shape) { * In this implementation the inner products for all gates are performed as single inner product for * efficiency. */ -std::vector +std::vector CaffeOpCreator::convertLSTM(const caffe::LayerParameter& layer, - const std::vector& inputs) { + const std::vector& inputs) { const auto& params = layer.recurrent_param(); // Inputs to the layer. @@ -754,9 +756,9 @@ CaffeOpCreator::convertLSTM(const caffe::LayerParameter& layer, auto x_xw_b = createOp("", x_xw, xb)->getOutput(0); // Split input and continuation tensors into seq_length slices. - std::vector x_xw_b_slices = createSplit(x_xw_b, seq_length, 0); - std::vector cont_slices = createSplit(cont, seq_length, 0); - std::vector h_slices(seq_length); + std::vector x_xw_b_slices = createSplit(x_xw_b, seq_length, 0); + std::vector cont_slices = createSplit(cont, seq_length, 0); + std::vector h_slices(seq_length); for (int32_t t = 0; t < seq_length; t++) { auto c_cont_t = createMul(c_t, cont_slices[t]); @@ -765,7 +767,7 @@ CaffeOpCreator::convertLSTM(const caffe::LayerParameter& layer, auto x_xw_b_t = x_xw_b_slices[t]; auto h_hw_t = createFullyConnected(h_cont_t, hw, 2); auto activation_inputs_concat = createAdd(x_xw_b_t, h_hw_t); - std::vector activation_inputs = createSplit(activation_inputs_concat, 4, 2); + auto activation_inputs = createSplit(activation_inputs_concat, 4, 2); auto i_t = createOp("", activation_inputs[0])->getOutput(0); auto f_t = createOp("", activation_inputs[1])->getOutput(0); diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h index e6e171b..30515a5 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h @@ -37,80 +37,80 @@ class CaffeOpCreator { public: explicit CaffeOpCreator(mir::Graph* g) : _graph(g) {}; - std::vector + std::vector convertInput(const caffe::LayerParameter& layer); - std::vector + std::vector convertConvolution(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertInnerProduct(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertConcat(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertPooling(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertSoftmax(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertReshape(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertReLU(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertScale(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertBatchNorm(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertDropout(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertDeconvolution(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertELU(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertEmbed(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertSigmoid(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertTanH(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertEltwise(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertSplit(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertLSTM(const caffe::LayerParameter& layer, - const std::vector& inputs); + const std::vector& inputs); void checkConvolution(const caffe::ConvolutionParameter& layer, std::set&); @@ -125,20 +125,20 @@ public: private: mir::Graph* _graph = nullptr; - mir::IODescriptor convertCaffeToMIR(const mir::IODescriptor& arg); + mir::Operation::Output* convertCaffeToMIR(mir::Operation::Output* arg); - mir::IODescriptor convertMIRToCaffe(const mir::IODescriptor& arg); + mir::Operation::Output* convertMIRToCaffe(mir::Operation::Output* arg); - mir::IODescriptor createAdd(mir::IODescriptor arg1, mir::IODescriptor arg2); + mir::Operation::Output* createAdd(mir::Operation::Output* arg1, mir::Operation::Output* arg2); - mir::IODescriptor createMul(mir::IODescriptor arg1, mir::IODescriptor arg2); + mir::Operation::Output* createMul(mir::Operation::Output* arg1, mir::Operation::Output* arg2); - std::vector - createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t axis); + std::vector + createSplit(mir::Operation::Output* arg, int32_t num_parts, int32_t axis); - mir::IODescriptor - createFullyConnected(const mir::IODescriptor& input, - const mir::IODescriptor& weights, + mir::Operation::Output* + createFullyConnected(mir::Operation::Output* input, + mir::Operation::Output* weights, int32_t axis); TensorVariant convertBlob(const caffe::BlobProto& blob); diff --git a/contrib/nnc/passes/interpreter/Interpreter.cpp b/contrib/nnc/passes/interpreter/Interpreter.cpp index 6fb78ff..c9ccb4a 100644 --- a/contrib/nnc/passes/interpreter/Interpreter.cpp +++ b/contrib/nnc/passes/interpreter/Interpreter.cpp @@ -96,7 +96,7 @@ void NNInterpreter::setInput(const std::string &name, const TensorVariant& t) { _inputTensors.emplace(name, t); } -TensorVariant NNInterpreter::getResult(IODescriptor tensor) { +TensorVariant NNInterpreter::getResult(const Operation::Output* tensor) { return _opResults.at(tensor->getNode()).at(tensor->getIndex()); } diff --git a/contrib/nnc/passes/interpreter/interpreter_pass.cpp b/contrib/nnc/passes/interpreter/interpreter_pass.cpp index 5d770cc..7a4fbcf 100644 --- a/contrib/nnc/passes/interpreter/interpreter_pass.cpp +++ b/contrib/nnc/passes/interpreter/interpreter_pass.cpp @@ -132,13 +132,13 @@ PassData InterpreterPass::run(PassData data) { g->accept(&interpreter); - for (auto out_node : g->getOutputs()) { - const auto& tensor = interpreter.getResult(out_node->getInput(0)->getProducer()); + for (const auto* output_op : g->getOutputs()) { + const auto& tensor = interpreter.getResult(output_op->getInput(0)->getProducer()); #ifdef NNC_HDF5_SUPPORTED - writeTensorToHDF5File(tensor, out_node->getName(), cli::artifactDir); + writeTensorToHDF5File(tensor, output_op->getName(), cli::artifactDir); #else - std::cout << "Result <" << out_node->getName() + std::cout << "Result <" << output_op->getName() << "> wasn't saved, due to lack of HDF5" << std::endl; #endif // NNC_HDF5_SUPPORTED } diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index cd44765..bcc4927 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -151,7 +151,7 @@ void ONNXImporterImpl::createGraphInputs() { const onnx::TensorProto* onnx_tensor = onnx_tensors[name]; _constantTensors.insert(std::make_pair(name, createTensor(onnx_tensor))); auto constant = _graph->create(name, _constantTensors.at(name)); - _tensorNameToIODescriptor[name] = constant->getOutput(0); + _tensorNameToOutput[name] = constant->getOutput(0); } else { const auto& onnx_input_shape = input.type().tensor_type().shape(); mir::Shape shape(onnx_input_shape.dim_size()); @@ -161,7 +161,7 @@ void ONNXImporterImpl::createGraphInputs() { } // TODO: Temporary solution! auto node = _graph->create(name, shape); - _tensorNameToIODescriptor[name] = node->getOutput(0); + _tensorNameToOutput[name] = node->getOutput(0); } } } @@ -186,16 +186,17 @@ mir::Graph *ONNXImporterImpl::createIR() { assert(onnx_node.has_op_type()); auto op_type = onnx_node.op_type().c_str(); // Fill inputs of the given node - std::vector inputs(onnx_node.input_size()); + std::vector inputs(onnx_node.input_size()); + std::vector outputs; + for (int i = 0; i < onnx_node.input_size(); i++) { auto& name = onnx_node.input(i); if (name.size() != 0) { - assert(_tensorNameToIODescriptor.find(name) != _tensorNameToIODescriptor.end()); - inputs[i] = _tensorNameToIODescriptor[name]; + assert(_tensorNameToOutput.find(name) != _tensorNameToOutput.end()); + inputs[i] = _tensorNameToOutput[name]; } } - std::vector outputs; auto* onnx_op_type = ONNXPerfectHash::getONNXOpType(op_type, onnx_node.op_type().size()); switch (onnx_op_type->opCode) { @@ -273,7 +274,7 @@ mir::Graph *ONNXImporterImpl::createIR() { // Set outputs' names for (int i = 0; i < outputs.size(); i++) { outputs[i]->getNode()->setName(onnx_node.output(i)); - auto result = _tensorNameToIODescriptor.emplace(outputs[i]->getNode()->getName(), outputs[i]); + auto result = _tensorNameToOutput.emplace(outputs[i]->getNode()->getName(), outputs[i]); if(!result.second) throw PassException("Name duplication: " + outputs[i]->getNode()->getName()); } diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h index 0028ee5..6d5d65a 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h @@ -44,12 +44,12 @@ public: private: void createGraphInputs(); - // This map maps onnx tensor names to MIR operations/nodes - std::map _tensorNameToIODescriptor; + // Maps ONNX tensor names to corresponding MIR operation outputs. + std::map _tensorNameToOutput; // This map keeps named tensors used as graph input initializers. // In addition here could be tensors from opGivenTensorFill and opConstant std::map _constantTensors; - std::vector _graphOutputs; + std::vector _graphOutputs; std::string _modelFilename; std::unique_ptr _model; mir::Graph* _graph; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 83735f9..05b2440 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -129,8 +129,8 @@ static void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStri } }; -std::vector -ONNXOpCreator::convertConv2D(const std::vector& inputs, +std::vector +ONNXOpCreator::convertConv2D(const std::vector& inputs, const onnx::NodeProto& onnx_node) { assert(inputs.size() >= 2); @@ -175,8 +175,8 @@ ONNXOpCreator::convertConv2D(const std::vector& inputs, return {convertMIRToONNX(result->getOutput(0))}; } -std::vector -ONNXOpCreator::convertConcat(const std::vector& inputs, +std::vector +ONNXOpCreator::convertConcat(const std::vector& inputs, const onnx::NodeProto& onnx_node) { bool found; int axis; @@ -187,8 +187,8 @@ ONNXOpCreator::convertConcat(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertGather(const std::vector& inputs, +std::vector +ONNXOpCreator::convertGather(const std::vector& inputs, const onnx::NodeProto& onnx_node) { bool found; int value; @@ -198,8 +198,8 @@ ONNXOpCreator::convertGather(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertPad(const std::vector& inputs, +std::vector +ONNXOpCreator::convertPad(const std::vector& inputs, const onnx::NodeProto& onnx_node) { bool found; float value; @@ -226,8 +226,8 @@ ONNXOpCreator::convertPad(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertPool(const std::vector& inputs, +std::vector +ONNXOpCreator::convertPool(const std::vector& inputs, ONNXOpCode op_code, const onnx::NodeProto& onnx_node) { ops::PoolOp::BorderType border_type; @@ -265,8 +265,8 @@ ONNXOpCreator::convertPool(const std::vector& inputs, return {convertMIRToONNX(result->getOutput(0))}; } -std::vector -ONNXOpCreator::convertSoftmax(const std::vector& inputs, +std::vector +ONNXOpCreator::convertSoftmax(const std::vector& inputs, const onnx::NodeProto& onnx_node) { int axis; bool found; @@ -276,8 +276,8 @@ ONNXOpCreator::convertSoftmax(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertReshape(const std::vector& inputs) { +std::vector +ONNXOpCreator::convertReshape(const std::vector& inputs) { // The original shape const auto& in_shape = inputs[0]->getShape(); @@ -310,8 +310,8 @@ ONNXOpCreator::convertReshape(const std::vector& inputs) { return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertUnsqueeze(const std::vector& inputs, +std::vector +ONNXOpCreator::convertUnsqueeze(const std::vector& inputs, const onnx::NodeProto& onnx_node) { auto* axes = findAttribute(onnx_node, "axes"); assert(axes && axes->ints_size()); @@ -333,29 +333,29 @@ ONNXOpCreator::convertUnsqueeze(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertRelu(const std::vector& inputs) { +std::vector +ONNXOpCreator::convertRelu(const std::vector& inputs) { assert(inputs.size() == 1); auto result = createOp(inputs[0]); return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertSigmoid(const std::vector& inputs) { +std::vector +ONNXOpCreator::convertSigmoid(const std::vector& inputs) { assert(inputs.size() == 1); auto result = createOp(inputs[0]); return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertElementwise(const std::vector& inputs, +std::vector +ONNXOpCreator::convertElementwise(const std::vector& inputs, mir::ops::ElementwiseOp::OpType op_type) { auto result = createOp(inputs, op_type); return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertUpsample(const std::vector& inputs, +std::vector +ONNXOpCreator::convertUpsample(const std::vector& inputs, const onnx::NodeProto& node) { bool success; std::string mode; @@ -383,8 +383,8 @@ ONNXOpCreator::convertUpsample(const std::vector& inputs, scales_vector)->getOutput(0))}; } -std::vector -ONNXOpCreator::convertBatchNorm(const std::vector& inputs, +std::vector +ONNXOpCreator::convertBatchNorm(const std::vector& inputs, const onnx::NodeProto& onnx_node, InputTensors& input_tensors) { // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias @@ -423,8 +423,8 @@ ONNXOpCreator::convertBatchNorm(const std::vector& inputs, return {convertMIRToONNX(result->getOutput(0))}; } -std::vector -ONNXOpCreator::convertDropout(const std::vector& inputs, +std::vector +ONNXOpCreator::convertDropout(const std::vector& inputs, const onnx::NodeProto& onnx_node) { bool found; float value; @@ -434,8 +434,8 @@ ONNXOpCreator::convertDropout(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertScale(const std::vector& inputs, +std::vector +ONNXOpCreator::convertScale(const std::vector& inputs, const onnx::NodeProto& onnx_node) { bool found; float value; @@ -448,8 +448,8 @@ ONNXOpCreator::convertScale(const std::vector& inputs, return {result->getOutput(0)}; } -std::vector -ONNXOpCreator::convertShape(const std::vector& inputs) { +std::vector +ONNXOpCreator::convertShape(const std::vector& inputs) { const auto& input_shape = inputs[0]->getShape(); int size = input_shape.rank(); Shape output_shape{size}; @@ -462,7 +462,7 @@ ONNXOpCreator::convertShape(const std::vector& inputs) { return {result->getOutput(0)}; } -std::vector +std::vector ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node, InputTensors& input_tensors) { auto values_att = findAttribute(onnx_node, "values"); @@ -478,7 +478,7 @@ ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node, return {result->getOutput(0)}; } -std::vector +std::vector ONNXOpCreator::convertConstant(const onnx::NodeProto& onnx_node, InputTensors& input_tensors) { assert((onnx_node.attribute_size() == 1) && @@ -493,8 +493,8 @@ ONNXOpCreator::convertConstant(const onnx::NodeProto& onnx_node, return {op}; } -std::vector -ONNXOpCreator::convertGemm(const std::vector& inputs, +std::vector +ONNXOpCreator::convertGemm(const std::vector& inputs, const onnx::NodeProto& onnx_node) { bool found; int ivalue; @@ -546,20 +546,20 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, beta_tensor = TensorVariant(beta_tensor, mult_a_b); } auto beta = createOp(beta_tensor)->getOutput(0); - std::vector descriptors = {beta, input_c}; - auto c_mult = createOp(descriptors, + std::vector mul_inputs = {beta, input_c}; + auto c_mult = createOp(mul_inputs, ops::ElementwiseOp::OpType::mul)->getOutput(0); assert(c_mult->getShape() == mult_a_b); auto result = createOp(input_a, input_b, c_mult); return {result->getOutput(0)}; } -mir::IODescriptor ONNXOpCreator::convertONNXToMIR(mir::IODescriptor arg) { +mir::Operation::Output* ONNXOpCreator::convertONNXToMIR(mir::Operation::Output* arg) { // NCHW -> NHWC return createOp(arg, std::vector{0, 2, 3, 1})->getOutput(0); } -mir::IODescriptor ONNXOpCreator::convertMIRToONNX(mir::IODescriptor arg) { +mir::Operation::Output* ONNXOpCreator::convertMIRToONNX(mir::Operation::Output* arg) { // NHWC -> NCHW return createOp(arg, std::vector{0, 3, 1, 2})->getOutput(0); } diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index 7c68e38..6a579bf 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -40,82 +40,82 @@ public: void setMirGraph(mir::Graph* g) { _graph = g; }; - std::vector - convertConv2D(const std::vector& inputs, + std::vector + convertConv2D(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertConcat(const std::vector& inputs, + std::vector + convertConcat(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector + std::vector convertGivenTensorFill(const onnx::NodeProto& onnx_node, InputTensors& input_tensors); - std::vector + std::vector convertConstant(const onnx::NodeProto& onnx_node, - InputTensors& input_tensors); + InputTensors& input_tensors); - std::vector - convertPool(const std::vector& inputs, + std::vector + convertPool(const std::vector& inputs, ONNXOpCode op_code, const onnx::NodeProto& onnx_node); - std::vector - convertPad(const std::vector& inputs, + std::vector + convertPad(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertSoftmax(const std::vector& inputs, + std::vector + convertSoftmax(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertReshape(const std::vector& inputs); + std::vector + convertReshape(const std::vector& inputs); - std::vector - convertRelu(const std::vector& inputs); + std::vector + convertRelu(const std::vector& inputs); - std::vector - convertSigmoid(const std::vector& inputs); + std::vector + convertSigmoid(const std::vector& inputs); - std::vector - convertUnsqueeze(const std::vector& inputs, + std::vector + convertUnsqueeze(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertUpsample(const std::vector& inputs, + std::vector + convertUpsample(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertElementwise(const std::vector& inputs, + std::vector + convertElementwise(const std::vector& inputs, mir::ops::ElementwiseOp::OpType op_type); - std::vector - convertScale(const std::vector& inputs, + std::vector + convertScale(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertShape(const std::vector& inputs); + std::vector + convertShape(const std::vector& inputs); - std::vector - convertBatchNorm(const std::vector& inputs, + std::vector + convertBatchNorm(const std::vector& inputs, const onnx::NodeProto& onnx_node, InputTensors& input_tensors); - std::vector - convertDropout(const std::vector& inputs, + std::vector + convertDropout(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertGather(const std::vector& inputs, + std::vector + convertGather(const std::vector& inputs, const onnx::NodeProto& onnx_node); - std::vector - convertGemm(const std::vector& inputs, + std::vector + convertGemm(const std::vector& inputs, const onnx::NodeProto& onnx_node); - mir::IODescriptor convertONNXToMIR(mir::IODescriptor arg); - mir::IODescriptor convertMIRToONNX(mir::IODescriptor arg); + mir::Operation::Output* convertONNXToMIR(mir::Operation::Output* arg); + mir::Operation::Output* convertMIRToONNX(mir::Operation::Output* arg); private: template diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp index 5d46bad..26ba9c4 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp @@ -279,7 +279,7 @@ void ModelAnalyzer::analyze(const mir::Graph* g) { for (const auto& out : node->getOutputs()) { const auto& consumers = out.getConsumers(); std::transform(consumers.begin(), consumers.end(), std::back_inserter(next_nodes), - [](const Operation::Input* input) { return input->getNode(); }); + [](Operation::Input* input) { return input->getNode(); }); } if (edge == next_nodes.size()) { // this node is fully analyzed, push it into RPO and pop from stack @@ -367,7 +367,8 @@ void ModelAnalyzer::visit(ops::ConstantOp& op) { // FIXME This is to work around deserializeTensors not being able to deserialize tensors of type // other than float32. - if (op.getOutput(0)->getConsumers().empty()) + const auto* output = op.getOutput(0); + if (output->getConsumers().empty()) return; appendOperationToInference(&op, "constant"); diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp index b2fa2cc..72ab0cf 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp @@ -165,8 +165,8 @@ void TfliteImporter::walkSubGraph(const SubGraph* s) { } void TfliteImporter::walkOperator(const Operator* op) { - std::vector inputs = getMIRInputsForOperator(op); - std::vector outputs; + std::vector inputs = getMIRInputsForOperator(op); + std::vector outputs; BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code(); switch (opcode) { @@ -282,8 +282,8 @@ void TfliteImporter::walkOperator(const Operator* op) { } } -std::vector TfliteImporter::getMIRInputsForOperator(const Operator* op) { - std::vector inputs; +std::vector TfliteImporter::getMIRInputsForOperator(const Operator* op) { + std::vector inputs; try { for (auto i : *(op->inputs())) { diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.h b/contrib/nnc/passes/tflite_frontend/tflite_importer.h index cbfcf7d..cac6616 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.h @@ -72,9 +72,9 @@ private: std::vector _graphInputs; std::vector _graphOutputs; - // This map maps indices of TFLite tensors to MIR operations/nodes - // that correspond to operations having these tensors as output. - std::map _tensorMap; + // Maps TFLite tensors indices to corresponding MIR operation outputs. + std::map _tensorMap; + // set of strings describing incorrect parts of network and parts of network unsupported by NNC std::set _problemsOpSet; @@ -108,9 +108,9 @@ private: void setIrNodeNames(); /** - * @brief Return MIR ops, preceding given tflite operator + * @brief Returns MIR operation outputs corresponding to the inputs of the given operator. */ - std::vector getMIRInputsForOperator(const ::tflite::Operator* op); + std::vector getMIRInputsForOperator(const ::tflite::Operator* op); mir::TensorVariant createTensor(const ::tflite::Tensor* t, const ::tflite::Buffer* b); diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index 5b48b3e..b09f8d5 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -89,8 +89,8 @@ static std::vector convertIntTensorToVector(const mir::Tensor& return v; } -static const mir::TensorVariant& extractTensor(mir::IODescriptor descr) { - auto constant_op = dynamic_cast(descr->getNode()); +static const mir::TensorVariant& extractTensor(const mir::Operation::Output* output) { + auto constant_op = dynamic_cast(output->getNode()); if (constant_op == nullptr) throw PassException("Non-constant input is not supported."); return constant_op->getValue(); @@ -101,9 +101,9 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector +std::vector TFLiteOpCreator::convertConv2D(const Conv2DOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); auto kernel = inputs.at(1); auto bias = inputs.at(2); @@ -130,9 +130,9 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector +std::vector TFLiteOpCreator::convertDepthwiseConv2D(const DepthwiseConv2DOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); auto kernel = inputs.at(1); auto bias = inputs.at(2); @@ -163,9 +163,9 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector +std::vector TFLiteOpCreator::convertConcatenation(const ::tflite::ConcatenationOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto result = createOp(inputs, opts->axis()); return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())}; } @@ -175,9 +175,9 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector +std::vector TFLiteOpCreator::convertMaxPool2D(const ::tflite::Pool2DOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); const auto& input_shape = input->getShape(); @@ -195,9 +195,9 @@ TFLiteOpCreator::convertMaxPool2D(const ::tflite::Pool2DOptions* opts, return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())}; } -std::vector +std::vector TFLiteOpCreator::convertAveragePool2D(const ::tflite::Pool2DOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); const auto& input_shape = input->getShape(); @@ -215,9 +215,9 @@ TFLiteOpCreator::convertAveragePool2D(const ::tflite::Pool2DOptions* opts, return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())}; } -std::vector +std::vector TFLiteOpCreator::convertSoftmax(const ::tflite::SoftmaxOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); // Softmax in TFLite is always 2-D. @@ -227,9 +227,9 @@ TFLiteOpCreator::convertSoftmax(const ::tflite::SoftmaxOptions* opts, return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertSlice(const ::tflite::SliceOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); mir::Tensor begin_tensor(extractTensor(inputs.at(1))); mir::Tensor size_tensor(extractTensor(inputs.at(2))); @@ -240,9 +240,9 @@ TFLiteOpCreator::convertSlice(const ::tflite::SliceOptions* opts, return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertReshape(const ::tflite::ReshapeOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); // TODO: we should also support "-1" values in new_shape, which means that correct @@ -252,9 +252,9 @@ TFLiteOpCreator::convertReshape(const ::tflite::ReshapeOptions* opts, return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertTransposeConv(const ::tflite::TransposeConvOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { mir::Tensor output_shape_tensor(extractTensor(inputs.at(0))); auto kernel = inputs.at(1); auto input = inputs.at(2); @@ -278,9 +278,9 @@ void TFLiteOpCreator::checkResizeNearestNeighbor(const ::tflite::ResizeNearestNe problems_op_set.insert("'align_corners' is not currently supported"); } -std::vector +std::vector TFLiteOpCreator::convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); mir::Tensor size_tensor(extractTensor(inputs.at(1))); @@ -294,27 +294,27 @@ TFLiteOpCreator::convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeigh return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::createElementwise(ops::ElementwiseOp::OpType op_type, ::tflite::ActivationFunctionType activation, - const std::vector& inputs) { + const std::vector& inputs) { auto result = createOp(inputs, op_type); return {addFusedActivation(result->getOutput(0), activation)}; } -std::vector -TFLiteOpCreator::convertSquaredDifference(const std::vector& inputs) { +std::vector +TFLiteOpCreator::convertSquaredDifference(const std::vector& inputs) { auto result = createOp(inputs, ops::ElementwiseOp::OpType::sub); - result = createOp(std::vector{ + result = createOp(std::vector{ result->getOutput(0), result->getOutput(0)}, ops::ElementwiseOp::OpType::mul); return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertMean(const ::tflite::ReducerOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); mir::Tensor axes_tensor(extractTensor(inputs.at(1))); @@ -329,9 +329,9 @@ void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts, checkActivationType(opts->fused_activation_function(), problems_op_set); } -std::vector +std::vector TFLiteOpCreator::convertFullyConnected(const ::tflite::FullyConnectedOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); auto weights = inputs.at(1); auto bias = inputs.at(2); @@ -361,8 +361,9 @@ void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type + EnumNameActivationFunctionType(activation_type)); } -mir::IODescriptor TFLiteOpCreator::addFusedActivation(mir::IODescriptor input, - ActivationFunctionType activation_type) { +mir::Operation::Output* +TFLiteOpCreator::addFusedActivation(mir::Operation::Output* input, + ActivationFunctionType activation_type) { // TODO Support other activation function types. switch (activation_type) { case ActivationFunctionType_NONE: @@ -378,9 +379,9 @@ mir::IODescriptor TFLiteOpCreator::addFusedActivation(mir::IODescriptor input, } } -std::vector +std::vector TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); std::vector squeeze_dims(opts->squeeze_dims()->begin(), @@ -389,9 +390,9 @@ TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions* opts, return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertPad(const ::tflite::PadOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); mir::Tensor paddings_tensor(extractTensor(inputs.at(1))); @@ -412,49 +413,49 @@ TFLiteOpCreator::convertPad(const ::tflite::PadOptions* opts, return {result->getOutput(0)}; } -std::vector -TFLiteOpCreator::convertTanh(const std::vector& inputs) { +std::vector +TFLiteOpCreator::convertTanh(const std::vector& inputs) { auto input = inputs.at(0); auto result = createOp(input); return {result->getOutput(0)}; } -std::vector -TFLiteOpCreator::convertReLU(const std::vector& inputs) { +std::vector +TFLiteOpCreator::convertReLU(const std::vector& inputs) { auto input = inputs.at(0); auto result = createOp(input); return {result->getOutput(0)}; } -std::vector -TFLiteOpCreator::convertReLU6(const std::vector& inputs) { +std::vector +TFLiteOpCreator::convertReLU6(const std::vector& inputs) { auto input = inputs.at(0); auto result = createOp(input, 6); return {result->getOutput(0)}; } -std::vector -TFLiteOpCreator::convertSqrt(const std::vector& inputs) { +std::vector +TFLiteOpCreator::convertSqrt(const std::vector& inputs) { auto input = inputs.at(0); auto result = createOp(input); return {result->getOutput(0)}; } -std::vector -TFLiteOpCreator::convertLogistic(const std::vector& inputs) { +std::vector +TFLiteOpCreator::convertLogistic(const std::vector& inputs) { auto input = inputs.at(0); auto result = createOp(input); return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertTranspose(const ::tflite::TransposeOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); mir::Tensor perm_tensor(extractTensor(inputs.at(1))); @@ -472,9 +473,9 @@ void TFLiteOpCreator::checkStridedSlice(const ::tflite::StridedSliceOptions* opt problems_op_set.insert("StridedSlice: parameter 'new_axis_mask' is not supported."); } -std::vector +std::vector TFLiteOpCreator::convertStridedSlice(const ::tflite::StridedSliceOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); mir::Tensor begin_tensor(extractTensor(inputs.at(1))); mir::Tensor end_tensor(extractTensor(inputs.at(2))); @@ -519,9 +520,9 @@ TFLiteOpCreator::convertStridedSlice(const ::tflite::StridedSliceOptions* opts, return {result->getOutput(0)}; } -std::vector +std::vector TFLiteOpCreator::convertLeakyReLU(const ::tflite::LeakyReluOptions* opts, - const std::vector& inputs) { + const std::vector& inputs) { auto input = inputs.at(0); auto result = createOp(input, opts->alpha()); diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index ef40c9c..cfb954a 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -45,96 +45,96 @@ class TFLiteOpCreator { public: explicit TFLiteOpCreator(Graph* g) : _graph(g) {} - std::vector + std::vector convertConv2D(const ::tflite::Conv2DOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertConcatenation(const ::tflite::ConcatenationOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertMaxPool2D(const ::tflite::Pool2DOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertAveragePool2D(const ::tflite::Pool2DOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertMean(const ::tflite::ReducerOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertSoftmax(const ::tflite::SoftmaxOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertSlice(const ::tflite::SliceOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertReshape(const ::tflite::ReshapeOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertFullyConnected(const ::tflite::FullyConnectedOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector - convertLogistic(const std::vector& inputs); + std::vector + convertLogistic(const std::vector& inputs); - std::vector - convertSqrt(const std::vector& inputs); + std::vector + convertSqrt(const std::vector& inputs); - std::vector + std::vector convertSqueeze(const ::tflite::SqueezeOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector createElementwise(ops::ElementwiseOp::OpType op_type, ::tflite::ActivationFunctionType activation, - const std::vector& inputs); + const std::vector& inputs); - std::vector - convertSquaredDifference(const std::vector& inputs); + std::vector + convertSquaredDifference(const std::vector& inputs); - std::vector - convertTanh(const std::vector& inputs); + std::vector + convertTanh(const std::vector& inputs); - std::vector - convertReLU(const std::vector& inputs); + std::vector + convertReLU(const std::vector& inputs); - std::vector - convertReLU6(const std::vector& inputs); + std::vector + convertReLU6(const std::vector& inputs); - std::vector + std::vector convertTransposeConv(const ::tflite::TransposeConvOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertPad(const ::tflite::PadOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertTranspose(const ::tflite::TransposeOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertStridedSlice(const ::tflite::StridedSliceOptions* opts, - const std::vector& inputs); + const std::vector& inputs); - std::vector + std::vector convertLeakyReLU(const ::tflite::LeakyReluOptions* opts, - const std::vector& inputs); + const std::vector& inputs); void checkPool2D(const ::tflite::Pool2DOptions* opts, std::set& problem_ops_set); @@ -165,8 +165,8 @@ private: void checkActivationType(::tflite::ActivationFunctionType, std::set&); - mir::IODescriptor addFusedActivation(mir::IODescriptor input, - ::tflite::ActivationFunctionType activation_type); + mir::Operation::Output* addFusedActivation(mir::Operation::Output* input, + ::tflite::ActivationFunctionType activation_type); template mir::Operation* createOp(Types&&... args); diff --git a/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp b/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp index c2360d6..1c97a3d 100644 --- a/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp +++ b/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp @@ -60,7 +60,7 @@ using namespace mir; namespace { -using OpConstructor = function& inputs)>; +using OpConstructor = function& inputs)>; const char* artifactName = "nnmodel"; @@ -72,7 +72,7 @@ const char* artifactName = "nnmodel"; * */ void fillGraph(Graph& g, const OpConstructor& op_constr, const vector& input_shapes) { // Create graph inputs. - vector inputs; + vector inputs; for (std::size_t i = 0; i < input_shapes.size(); ++i) { auto input_op = g.create("x" + to_string(i), input_shapes[i]); inputs.push_back(input_op->getOutput(0)); @@ -214,7 +214,8 @@ TEST(acl_backend_mir_to_dom, constant) { TensorVariant constant_data = createTensorVariant(shape); Graph g; - OpConstructor op_generator = [&constant_data](Graph& g, const vector& inputs) { + OpConstructor op_generator = [&constant_data](Graph& g, + const vector& inputs) { return g.create("data", constant_data); }; @@ -233,7 +234,7 @@ TEST(acl_backend_mir_to_dom, bias) { TensorVariant w = createTensorVariant({channels}); Graph g; - OpConstructor op_generator = [&w](Graph& g, const vector& inputs) { + OpConstructor op_generator = [&w](Graph& g, const vector& inputs) { auto bias = g.create("", w)->getOutput(0); return g.create("bias", inputs[0], bias); }; @@ -254,7 +255,7 @@ TEST(acl_backend_mir_to_dom, scale) { TensorVariant w = createTensorVariant({channels}); Graph g; - OpConstructor op_generator = [&w](Graph& g, const vector& inputs) { + OpConstructor op_generator = [&w](Graph& g, const vector& inputs) { auto scale = g.create("", w)->getOutput(0); return g.create("scale", inputs[0], scale); }; @@ -275,7 +276,7 @@ TEST(acl_backend_mir_to_dom, scale) { TEST(acl_backend_mir_to_dom, concat) { Graph g; - OpConstructor op_generator = [](Graph& g, const vector& inputs) { + OpConstructor op_generator = [](Graph& g, const vector& inputs) { return g.create("concat", inputs, 3); }; vector input_shapes{{2, 3, 5, 1}, {2, 3, 5, 3}}; @@ -318,7 +319,7 @@ TEST(acl_backend_mir_to_dom, conv2d) { Graph g; OpConstructor op_generator = [kernel_tensor, strides](mir::Graph& g, - const std::vector& inputs) { + const std::vector& inputs) { std::vector padding{0, 0}; auto kernel = g.create("", kernel_tensor)->getOutput(0); return g.create("conv2d", inputs[0], kernel, strides, padding, padding); @@ -345,7 +346,7 @@ TEST(acl_backend_mir_to_dom, depthwise_conv) { Graph g; OpConstructor op_generator = [kernel_tensor, strides](mir::Graph& g, - const std::vector& inputs) { + const std::vector& inputs) { std::vector padding{0, 0}; auto kernel = g.create("", kernel_tensor)->getOutput(0); return g.create("depthwiseConv2d", inputs[0], kernel, @@ -375,7 +376,7 @@ TEST(acl_backend_mir_to_dom, fully_connected) { TensorVariant weights_tensor = createTensorVariant(weights_shape); Graph g; - OpConstructor opGenerator = [weights_tensor](Graph& g, const vector& inputs) { + OpConstructor opGenerator = [weights_tensor](Graph& g, const vector& inputs) { auto weights = g.create("", weights_tensor)->getOutput(0); return g.create("fc", inputs[0], weights); }; @@ -398,12 +399,12 @@ TEST(acl_backend_mir_to_dom, maxpool) { mir::Shape strides{1, 1}; Graph g; - OpConstructor op_generator = [window_shape, strides](mir::Graph& g, - const std::vector& inputs) { - std::vector padding{0, 0}; - return g.create("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX, - window_shape, strides, padding, padding, - mir::ops::PoolOp::BorderType::EMPTY); + OpConstructor op_generator = [window_shape, strides]( + mir::Graph& g, const std::vector& inputs) { + std::vector padding{0, 0}; + return g.create("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX, + window_shape, strides, padding, padding, + mir::ops::PoolOp::BorderType::EMPTY); }; vector input_shapes{{1, 10, 10, 3}}; @@ -447,7 +448,7 @@ static void testActivationOp(const OpConstructor& op_generator) { } TEST(acl_backend_mir_to_dom, relu) { - OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { + OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { return g.create("relu", inputs[0]); }; @@ -456,7 +457,7 @@ TEST(acl_backend_mir_to_dom, relu) { TEST(acl_backend_mir_to_dom, capped_relu) { float cap = 6; - OpConstructor op_generator = [cap](Graph& g, const std::vector& inputs) { + OpConstructor op_generator = [cap](Graph& g, const std::vector& inputs) { return g.create("capped_relu", inputs[0], cap); }; @@ -464,7 +465,7 @@ TEST(acl_backend_mir_to_dom, capped_relu) { } TEST(acl_backend_mir_to_dom, sigmoid) { - OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { + OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { return g.create("sigmoid", inputs[0]); }; @@ -476,7 +477,7 @@ TEST(acl_backend_mir_to_dom, DISABLED_elu) { } TEST(acl_backend_mir_to_dom, tanh) { - OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { + OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { return g.create("tanh", inputs[0]); }; @@ -489,7 +490,7 @@ TEST(acl_backend_mir_to_dom, DISABLED_reduce_mean) { TEST(acl_backend_mir_to_dom, softmax) { Graph g; - OpConstructor op_generator = [](Graph& g, const vector& inputs) { + OpConstructor op_generator = [](Graph& g, const vector& inputs) { return g.create("softmax", inputs[0], 3); }; vector input_shapes{{1, 1, 1, 3}}; @@ -517,7 +518,7 @@ TEST(acl_backend_mir_to_dom, reshape) { Shape input_shape{1, h, w, c}; Shape output_shape{1, h * w * c}; - OpConstructor op_generator = [output_shape](Graph& g, const vector& inputs) { + OpConstructor op_generator = [output_shape](Graph& g, const vector& inputs) { return g.create("reshape", inputs[0], output_shape); }; @@ -545,7 +546,7 @@ TEST(acl_backend_mir_to_dom, transpose) { vector perm{0, 3, 1, 2}; Graph g; - OpConstructor op_generator = [&perm](Graph& g, const vector& inputs) { + OpConstructor op_generator = [&perm](Graph& g, const vector& inputs) { return g.create("transpose", inputs[0], perm); }; vector input_shapes{{1, 10, 10, channels}}; diff --git a/contrib/nnc/unittests/core/Graph.cpp b/contrib/nnc/unittests/core/Graph.cpp index 0cd8801..0ef81c3 100644 --- a/contrib/nnc/unittests/core/Graph.cpp +++ b/contrib/nnc/unittests/core/Graph.cpp @@ -37,9 +37,8 @@ TEST(Graph, ReplaceInputs) { auto n2 = g->create("op2", n1->getOutput(0)); auto n3 = g->create("op3", n2->getOutput(0)); auto n4 = g->create("op4", n2->getOutput(0)); - auto n5 = g->create("op5", - std::vector{n3->getOutput(0), n4->getOutput(0)}, - 0); + std::vector concat_inputs{n3->getOutput(0), n4->getOutput(0)}; + auto n5 = g->create("op5", concat_inputs, 0); g->replaceInputNodes({"op1", "op4"}); diff --git a/contrib/nnc/unittests/core/ShapeInference.cpp b/contrib/nnc/unittests/core/ShapeInference.cpp index 31c20e4..dac2738 100644 --- a/contrib/nnc/unittests/core/ShapeInference.cpp +++ b/contrib/nnc/unittests/core/ShapeInference.cpp @@ -140,10 +140,8 @@ TEST(ShapeInferenceTest, ElementwiseBC) { auto input = g.create("input1", input_shape); auto input2 = g.create("input2", input2_shape); - auto add = g.create("add_1", - std::vector{input->getOutput(0), - input2->getOutput(0)}, - ops::ElementwiseOp::OpType::add); + std::vector add_inputs{input->getOutput(0), input2->getOutput(0)}; + auto add = g.create("add_1", add_inputs, ops::ElementwiseOp::OpType::add); ASSERT_EQ(add->getOutputShape(0), Shape({1, 10, 10, 10})); } diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index daf1b0e..ac81595 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -119,10 +119,11 @@ namespace { */ mir::Operation* fillGraph(mir::Graph& g, - const function& inputs)>& op_gen, + const function& inputs)>& op_gen, const vector>& input_ntensors) { // Create graph inputs. - std::vector inputs; + std::vector inputs; for (std::size_t i = 0; i < input_ntensors.size(); ++i) { auto input_op = g.create("x" + std::to_string(i), input_ntensors[i]->getShape()); @@ -301,7 +302,7 @@ void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test template void createAndRunTestGraph( function& inputs)> op_generator, + const std::vector& inputs)> op_generator, TestFunc artifactOperation, const vector>& input_ntensors, Args& ...input_atensors) { @@ -336,7 +337,7 @@ TEST(cpp_operations_test, bias) { fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], inputs[1]); }; @@ -352,7 +353,7 @@ TEST(cpp_operations_test, scale) { fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], inputs[1]); }; @@ -367,7 +368,7 @@ TEST(cpp_operations_test, capped_relu) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto op_generator = [cap](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [cap](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], cap); }; @@ -389,7 +390,8 @@ TEST(cpp_operations_test, concat) { vector> input_ntensors(2); fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f); fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f); - auto op_generator = [axis](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [axis](mir::Graph& g, + const std::vector& inputs) { return g.create("y", inputs, axis); }; @@ -412,7 +414,7 @@ TEST(cpp_operations_test, add2bc) { vector> input_ntensors(2); fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f); fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::add); }; @@ -436,7 +438,7 @@ TEST(cpp_operations_test, mul3bc) { fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f); fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f); fillTensors(input_ntensors[2], input_atensors[2], shape_data3, 3.0f); - auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::mul); }; @@ -460,7 +462,7 @@ TEST(cpp_operations_test, div3bc) { fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 5.0f); fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f); fillTensors(input_ntensors[2], input_atensors[2], shape_data3, 3.0f); - auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::div); }; @@ -483,7 +485,7 @@ TEST(cpp_operations_test, add2) { vector> input_ntensors(2); fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::add); }; @@ -505,7 +507,7 @@ TEST(cpp_operations_test, sub3) { fillTensors(input_n_tensors[0], input_atensors[0], shape_data, 1.0f); fillTensors(input_n_tensors[1], input_atensors[1], shape_data, 2.0f); fillTensors(input_n_tensors[2], input_atensors[2], shape_data, 3.0f); - auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::sub); }; @@ -527,7 +529,7 @@ TEST(cpp_operations_test, mul3) { fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f); fillTensors(input_ntensors[2], input_atensors[2], shape_data, 3.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::mul); }; @@ -551,7 +553,7 @@ TEST(cpp_operations_test, max4) { fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f); fillTensors(input_ntensors[2], input_atensors[2], shape_data, 3.0f); fillTensors(input_ntensors[3], input_atensors[3], shape_data, 3.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::max); }; @@ -588,7 +590,7 @@ TEST(cpp_operations_test, convTransposed2d) { fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f); auto pad_t = mir::ops::PaddingType::Same; auto op_generator = [&strides, pad_t]( - mir::Graph& g, const std::vector& inputs) { + mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], inputs[1], strides, pad_t); }; @@ -619,7 +621,7 @@ TEST(cpp_operations_test, conv2d) { fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f); auto op_generator = [&strides](mir::Graph& g, - const std::vector& inputs) { + const std::vector& inputs) { std::vector padding{0, 0}; return g.create("y", inputs[0], inputs[1], strides, padding, padding); @@ -652,7 +654,7 @@ TEST(cpp_operations_test, depthwise_conv) { fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f); auto op_generator = [&strides](mir::Graph& g, - const std::vector& inputs) { + const std::vector& inputs) { std::vector padding{0, 0}; return g.create("y", inputs[0], inputs[1], strides, padding, padding); @@ -671,7 +673,7 @@ TEST(cpp_operations_test, fully_connected) { Tensor input_atensor1; fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f); fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], inputs[1]); }; @@ -693,7 +695,8 @@ TEST(cpp_operations_test, resize_NN_test) { vector> input_ntensors(1); Tensor input_atensor; fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f); - auto op_generator = [&res_shape](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [&res_shape](mir::Graph& g, + const std::vector& inputs) { return g.create( "y", inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape); @@ -717,7 +720,8 @@ TEST(cpp_operations_test, resize_NN_test_scales) { vector> input_ntensors(1); Tensor input_atensor; fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f); - auto op_generator = [&scales](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [&scales](mir::Graph& g, + const std::vector& inputs) { return g.create( "y", inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales); @@ -728,7 +732,7 @@ TEST(cpp_operations_test, resize_NN_test_scales) { template static mir::Operation* createPool(mir::Graph& g, - const std::vector& inputs, + const std::vector& inputs, mir::Shape& window_shape, mir::Shape& strides, irOps::PoolOp::BorderType border) { @@ -788,7 +792,7 @@ TEST(cpp_operations_test, relu) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0]); }; @@ -801,7 +805,7 @@ TEST(cpp_operations_test, leaky_relu) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], 0.1); }; @@ -814,7 +818,7 @@ TEST(cpp_operations_test, sigmoid) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0]); }; @@ -827,7 +831,7 @@ TEST(cpp_operations_test, elu) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], 1); }; @@ -840,7 +844,7 @@ TEST(cpp_operations_test, tanh) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0]); }; @@ -865,8 +869,8 @@ TEST(cpp_operations_test, reduceMeanTst) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f); - auto op_generator = [&axis_list, keep_dims](mir::Graph& g, - const std::vector& inputs) { + auto op_generator = [&axis_list, keep_dims]( + mir::Graph& g, const std::vector& inputs) { auto op = g.create( "y", inputs[0], axis_list, keep_dims, mir::ops::ReduceFOp::FuncType::mean); @@ -888,7 +892,7 @@ TEST(cpp_operations_test, softmax) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); - auto op_generator = [axis](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [axis](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], axis); }; @@ -911,7 +915,7 @@ TEST(cpp_operations_test, slice4d) { Tensor input_atensor; vector> input_n_tensor(1); fillTensors(input_n_tensor[0], input_atensor, shape_data, 1.0f); - auto op_gen = [st, sz](mir::Graph& g, const std::vector& inputs) { + auto op_gen = [st, sz](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], mir::Shape(st), mir::Shape(sz)); }; @@ -929,7 +933,8 @@ TEST(cpp_operations_test, reshape) { Tensor input_atensor; vector> input_ntensors(1); fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f); - auto op_generator = [&output_nshape](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [&output_nshape](mir::Graph& g, + const std::vector& inputs) { return g.create("y", inputs[0], output_nshape); }; @@ -942,7 +947,7 @@ TEST(cpp_operations_test, sqrtTest) { Tensor input_atensor; vector> input_ntensor(1); fillTensors(input_ntensor[0], input_atensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0]); }; createAndRunTestGraph(op_generator, sqrtFN, input_ntensor, input_atensor); @@ -967,7 +972,7 @@ TEST(cpp_operations_test, pad) { mir::DTYPE::FLOAT32, sizeof(float)); auto op_generator = [num_dims, &paddings, &constant_value] - (mir::Graph& g, const std::vector& inputs) { + (mir::Graph& g, const std::vector& inputs) { return g.create("y", inputs[0], num_dims, paddings, constant_value); }; @@ -987,7 +992,8 @@ TEST(cpp_operations_test, transpose) { {3, 2, 1, 0} }; for (const auto& permute: test_cases_pack_4d) { - auto op_generator = [&permute](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [&permute](mir::Graph& g, + const std::vector& inputs) { return g.create("transpose", inputs[0], permute); }; createAndRunTestGraph(op_generator, transpose, input_ntensor_4d, input_atensor_4d); @@ -1004,7 +1010,8 @@ TEST(cpp_operations_test, transpose) { {2, 1, 0} }; for (const auto& permute: test_cases_pack_3d) { - auto op_generator = [&permute](mir::Graph& g, const std::vector& inputs) { + auto op_generator = [&permute](mir::Graph& g, + const std::vector& inputs) { return g.create("transpose", inputs[0], permute); }; createAndRunTestGraph(op_generator, transpose, input_ntensor_3d, input_atensor_3d); diff --git a/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp index 1b3c36c..53d24d4 100644 --- a/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp @@ -55,9 +55,8 @@ TEST(ModelAnalyzer, linearization) { Operation* head2 = g.create("head2", input->getOutput(0)); Operation* tail1 = g.create("tail1", head1->getOutput(0)); Operation* tail2 = g.create("tail2", head2->getOutput(0)); - Operation* join = g.create("join", std::vector{tail1->getOutput(0), - tail2->getOutput(0)}, - 0); + std::vector concat_inputs{tail1->getOutput(0), tail2->getOutput(0)}; + Operation* join = g.create("join", concat_inputs, 0); // Check that layout is desired ModelAnalyzer ma; -- 2.7.4