From a3276bb0665ec3536b9b6668ade63bc019549bc0 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 19 Nov 2018 18:32:09 +0300 Subject: [PATCH] [nnc] Make constructors of `Operations` accept other `Operation`s as arguments (#2292) * Make constructors of `Operations` accept `Operation`s they depend on. * Remove `connectInput` method, which became redundant. Signed-off-by: Sergei Barannikov --- contrib/nnc/core/modelIR/Operation.cpp | 15 ++- contrib/nnc/include/core/modelIR/Operation.h | 12 +- .../include/core/modelIR/operations/BatchNormOp.h | 29 ++--- .../include/core/modelIR/operations/BiasAddOp.h | 20 ++-- .../include/core/modelIR/operations/CappedReluOp.h | 21 ++-- .../nnc/include/core/modelIR/operations/ConcatOp.h | 24 ++-- .../nnc/include/core/modelIR/operations/Conv2DOp.h | 25 ++--- .../include/core/modelIR/operations/Deconv2DOp.h | 15 +-- .../core/modelIR/operations/DepthwiseConv2DOp.h | 24 ++-- .../include/core/modelIR/operations/DropoutOp.h | 11 +- .../core/modelIR/operations/ElementwiseOp.h | 6 +- .../nnc/include/core/modelIR/operations/EluOp.h | 7 +- .../core/modelIR/operations/FullyConnectedOp.h | 20 ++-- .../nnc/include/core/modelIR/operations/PadOp.h | 19 ++-- .../nnc/include/core/modelIR/operations/PoolOp.h | 36 +++--- .../nnc/include/core/modelIR/operations/ReluOp.h | 14 +-- .../include/core/modelIR/operations/ReshapeOp.h | 16 +-- .../nnc/include/core/modelIR/operations/ScaleOp.h | 20 ++-- .../include/core/modelIR/operations/SoftmaxOp.h | 21 ++-- .../include/core/modelIR/operations/SqueezeOp.h | 12 +- .../nnc/include/core/modelIR/operations/TanhOp.h | 3 +- .../include/core/modelIR/operations/VariableOp.h | 14 +-- .../nnc/include/core/modelIR/operations/common.h | 9 +- .../nnc/passes/caffe_frontend/caffe_op_creator.cpp | 56 ++++------ .../nnc/passes/caffe_frontend/caffe_op_creator.h | 10 +- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 21 ++-- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h | 11 +- .../passes/tflite_frontend/tflite_op_creator.cpp | 122 ++++++++++---------- .../nnc/passes/tflite_frontend/tflite_op_creator.h | 27 ++--- contrib/nnc/tests/interpreter/graph_creator.cpp | 123 +++++++++++---------- contrib/nnc/tests/soft_backend/CompileCPP.cpp | 6 +- contrib/nnc/unittests/core/Graph.cpp | 38 +++---- contrib/nnc/unittests/core/NodeReplacer.cpp | 12 +- contrib/nnc/unittests/core/ShapeInference.cpp | 15 +-- contrib/nnc/unittests/core/operation.cpp | 30 ++--- .../nnc/unittests/soft_backend/CPPOperations.cpp | 108 +++++++++++------- contrib/nnc/unittests/soft_backend/Generator.cpp | 3 +- .../nnc/unittests/soft_backend/ModelAnalyzer.cpp | 20 ++-- 38 files changed, 432 insertions(+), 563 deletions(-) diff --git a/contrib/nnc/core/modelIR/Operation.cpp b/contrib/nnc/core/modelIR/Operation.cpp index 3bc8e28..c6e3e81 100644 --- a/contrib/nnc/core/modelIR/Operation.cpp +++ b/contrib/nnc/core/modelIR/Operation.cpp @@ -41,14 +41,13 @@ namespace nnc { namespace mir { -Operation::Operation(Type type, std::size_t max_inputs, std::size_t max_outputs) - : _type(type), _max_inputs(max_inputs), _max_outputs(max_outputs) { - _inputs.resize(max_inputs); -} - -void Operation::connectInputTo(int inputIndex, const IODescriptor& descriptor) { - descriptor.op->_outputs.emplace_back(this); - _inputs[inputIndex] = descriptor; +Operation::Operation(Type type, const std::vector& args) + : _type(type), _num_inputs(args.size()), _num_outputs(1) { + _inputs.resize(_num_inputs); + for (std::size_t i = 0; i < _num_inputs; ++i) { + args[i].op->_outputs.push_back(this); + _inputs[i] = args[i]; + } } const IODescriptor Operation::getOutput(std::size_t index) { diff --git a/contrib/nnc/include/core/modelIR/Operation.h b/contrib/nnc/include/core/modelIR/Operation.h index e6a9e6a..ac8213e 100644 --- a/contrib/nnc/include/core/modelIR/Operation.h +++ b/contrib/nnc/include/core/modelIR/Operation.h @@ -52,10 +52,8 @@ public: const std::string& getName() const { return _name; } void setName(const std::string& name) { _name = name; } - std::size_t getNumInputs() const { return _max_inputs; } - std::size_t getNumOutputs() const { return _max_outputs; } - - void connectInputTo(int inputIndex, const IODescriptor& descriptor); + std::size_t getNumInputs() const { return _num_inputs; } + std::size_t getNumOutputs() const { return _num_outputs; } const IODescriptor getOutput(std::size_t index); @@ -73,14 +71,14 @@ public: void accept(IVisitor* v); protected: - Operation(Type type, std::size_t max_inputs, std::size_t max_outputs); + Operation(Type type, const std::vector& args); private: Type _type; std::size_t _id; std::string _name; - std::size_t _max_inputs; - std::size_t _max_outputs; + std::size_t _num_inputs; + std::size_t _num_outputs; std::vector _inputs; std::vector _outputs; std::map _inputShapes; diff --git a/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h b/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h index 9bb0e57..66572dc 100644 --- a/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h +++ b/contrib/nnc/include/core/modelIR/operations/BatchNormOp.h @@ -19,40 +19,31 @@ #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { -class BatchNormOp : public Operation -{ +class BatchNormOp : public Operation { public: - explicit BatchNormOp(float movingAvgFraction, float eps, bool spatial) : - Operation(Type::batchNorm, 1, 1), - _movingAvgFraction(movingAvgFraction), - _eps(eps), - _spatial(spatial) - { - // EMPTY - } + BatchNormOp(const IODescriptor& arg, float movingAvgFraction, float eps, bool spatial) + : Operation(Type::batchNorm, {arg}), _movingAvgFraction(movingAvgFraction), _eps(eps), + _spatial(spatial) {} /** * @return The epsilon value to use to avoid division by zero. */ - float getEps() { return _eps; } + float getEps() const { return _eps; } /** * @return Factor used in computing the running mean and variance. * e.g., running_mean = running_mean * movingAvgFraction + mean * (1 - movingAvgFraction). */ - float getMovingAvgFraction() { return _movingAvgFraction; } + float getMovingAvgFraction() const { return _movingAvgFraction; } /** * @return If true, compute the mean and variance across all spatial elements If false, compute the mean and variance per feature. */ - bool getSpatial() { return _spatial; } + bool getSpatial() const { return _spatial; } private: float _movingAvgFraction; diff --git a/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h b/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h index 35b661a..dc42c9e 100644 --- a/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h +++ b/contrib/nnc/include/core/modelIR/operations/BiasAddOp.h @@ -20,20 +20,16 @@ #include "core/modelIR/Operation.h" #include "core/modelIR/TensorVariant.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ - -class BiasAddOp : public Operation -{ +namespace nnc { +namespace mir { +namespace ops { + +class BiasAddOp : public Operation { public: - explicit BiasAddOp(const TensorVariant& weights) : Operation(Type::biasAdd, 1, 1), - _weights(weights) {} + BiasAddOp(const IODescriptor& arg, const TensorVariant& weights) + : Operation(Type::biasAdd, {arg}), _weights(weights) {} - const TensorVariant &getWeights() const { return _weights; } + const TensorVariant& getWeights() const { return _weights; } private: TensorVariant _weights; diff --git a/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h b/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h index 5609b5d..bbb5e34 100644 --- a/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/CappedReluOp.h @@ -19,23 +19,18 @@ #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { class CappedReluOp : public Operation { - public: - explicit CappedReluOp(float cap) : Operation(Type::cappedReLU, 1, 1), _cap(cap) { - } +public: + CappedReluOp(const IODescriptor& arg, float cap) + : Operation(Type::cappedReLU, {arg}), _cap(cap) {} - float getCap() const { - return _cap; - } + float getCap() const { return _cap; } - private: +private: float _cap; }; diff --git a/contrib/nnc/include/core/modelIR/operations/ConcatOp.h b/contrib/nnc/include/core/modelIR/operations/ConcatOp.h index 928d55d..19f2eda 100644 --- a/contrib/nnc/include/core/modelIR/operations/ConcatOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ConcatOp.h @@ -17,30 +17,22 @@ #ifndef _NNC_CORE_IR_MODEL_CONCAT_OP_H_ #define _NNC_CORE_IR_MODEL_CONCAT_OP_H_ -#include - #include "core/modelIR/Operation.h" -#include "core/modelIR/Shape.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { /** * @brief Description of tensor concatenation operation. */ -class ConcatOp : public Operation -{ +class ConcatOp : public Operation { public: - ConcatOp(int num_inputs, int32_t axis) : Operation(Type::concat, num_inputs, 1), _axis(axis) {} + ConcatOp(const std::vector& args, int32_t axis) + : Operation(Type::concat, args), _axis(axis) {} - int32_t getAxis() const - { - if (_axis < 0) - { + int32_t getAxis() const { + if (_axis < 0) { // Negative axis is used to index starting from the last element of the shape // -1 means last element, -2 means second from end, like in python int32_t res = _axis + getInputShape(0).rank(); diff --git a/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h b/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h index 2958b16..5d10d31 100644 --- a/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/Conv2DOp.h @@ -23,28 +23,21 @@ #include "core/modelIR/operations/common.h" #include "core/modelIR/TensorVariant.h" -#include "core/modelIR/Shape.h" +namespace nnc { +namespace mir { +namespace ops { -namespace nnc -{ -namespace mir -{ -namespace ops -{ - -class Conv2DOp : public Operation -{ +class Conv2DOp : public Operation { public: - Conv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding) - : Operation(Type::conv2D, 1, 1), _kernel(kernel), _strides(strides), - _padding(padding) - { + Conv2DOp(const IODescriptor& arg, const TensorVariant& kernel, const Shape& strides, + PaddingType padding) + : Operation(Type::conv2D, {arg}), _kernel(kernel), _strides(strides), _padding(padding) { _pads.resize(3); } - const TensorVariant &getKernel() const { return _kernel; } + const TensorVariant& getKernel() const { return _kernel; } - const Shape &getStrides() const { return _strides; } + const Shape& getStrides() const { return _strides; } PaddingType getPaddingType() const { return _padding; } diff --git a/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h b/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h index 24894f4..0ce5e27 100644 --- a/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h @@ -21,24 +21,21 @@ #include "core/modelIR/operations/common.h" #include "core/modelIR/TensorVariant.h" -#include "core/modelIR/Shape.h" - - namespace nnc { namespace mir { namespace ops { class DeConv2DOp : public Operation { public: - DeConv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding) - : Operation(Type::deConv2D, 1, 1), _kernel(kernel), _strides(strides), - _padding(padding) { + DeConv2DOp(const IODescriptor& arg, const TensorVariant& kernel, const Shape& strides, + PaddingType padding) + : Operation(Type::deConv2D, {arg}), _kernel(kernel), _strides(strides), _padding(padding) { _pads.resize(3); } - const TensorVariant &getKernel() const { return _kernel; } + const TensorVariant& getKernel() const { return _kernel; } - const Shape &getStrides() const { return _strides; } + const Shape& getStrides() const { return _strides; } PaddingType getPaddingType() const { return _padding; } @@ -52,7 +49,7 @@ private: PaddingType _padding; std::vector _pads; }; - + } // namespace ops } // namespace mir } // namespace nnc diff --git a/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h b/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h index 57e963e..e25a057 100644 --- a/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h +++ b/contrib/nnc/include/core/modelIR/operations/DepthwiseConv2DOp.h @@ -23,27 +23,21 @@ #include "core/modelIR/TensorVariant.h" #include "core/modelIR/operations/common.h" -#include "core/modelIR/Shape.h" +namespace nnc { +namespace mir { +namespace ops { -namespace nnc -{ -namespace mir -{ -namespace ops -{ - -class DepthwiseConv2DOp : public Operation -{ +class DepthwiseConv2DOp : public Operation { public: - explicit DepthwiseConv2DOp(const TensorVariant &kernel, const Shape &strides, PaddingType padding) - : Operation(Type::depthwiseConv, 1, 1), _kernel(kernel), _strides(strides), _padding(padding) - { + DepthwiseConv2DOp(const IODescriptor& arg, const TensorVariant& kernel, const Shape& strides, + PaddingType padding) + : Operation(Type::depthwiseConv, {arg}), _kernel(kernel), _strides(strides), _padding(padding) { _pads.resize(_kernel.getShape().rank()); } - const TensorVariant &getKernel() const { return _kernel; } + const TensorVariant& getKernel() const { return _kernel; } - const Shape &getStrides() const { return _strides; } + const Shape& getStrides() const { return _strides; } PaddingType getPaddingType() const { return _padding; } diff --git a/contrib/nnc/include/core/modelIR/operations/DropoutOp.h b/contrib/nnc/include/core/modelIR/operations/DropoutOp.h index 2ce8fbd..5b48200 100644 --- a/contrib/nnc/include/core/modelIR/operations/DropoutOp.h +++ b/contrib/nnc/include/core/modelIR/operations/DropoutOp.h @@ -19,16 +19,13 @@ #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { class DropoutOp : public Operation { public: - explicit DropoutOp(float rate) : Operation(Type::dropout, 1, 1), _rate(rate) {} + DropoutOp(const IODescriptor& arg, float rate) : Operation(Type::dropout, {arg}), _rate(rate) {} /** * @return The ratio of random dropout diff --git a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h index bb24720..3f3e638 100644 --- a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h @@ -35,14 +35,14 @@ public: * @param op_type Type of operation to perform * @param num_inputs Number of inputs */ - explicit ElementwiseOp(OpType op_type, size_t num_inputs) : - Operation(Type::elementwise, num_inputs, 1), _opType(op_type) {}; + ElementwiseOp(const std::vector& args, OpType op_type) + : Operation(Type::elementwise, args), _opType(op_type) {}; private: OpType _opType; + public: OpType getOpType() const { return _opType; } - }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/EluOp.h b/contrib/nnc/include/core/modelIR/operations/EluOp.h index 8a03840..9341eb2 100644 --- a/contrib/nnc/include/core/modelIR/operations/EluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/EluOp.h @@ -25,11 +25,10 @@ namespace ops { class EluOp : public Operation { public: - explicit EluOp(float alpha) : Operation(Type::ELU, 1, 1), _alpha(alpha) {} + EluOp(const IODescriptor& arg, float alpha) : Operation(Type::ELU, {arg}), _alpha(alpha) {} + + float getAlpha() const { return _alpha; } - float getAlpha() const { - return _alpha; - } private: float _alpha = 1.0; }; diff --git a/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h b/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h index 00555b8..65a4b78 100644 --- a/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h +++ b/contrib/nnc/include/core/modelIR/operations/FullyConnectedOp.h @@ -20,20 +20,16 @@ #include "core/modelIR/Operation.h" #include "core/modelIR/TensorVariant.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ - -class FullyConnectedOp : public Operation -{ +namespace nnc { +namespace mir { +namespace ops { + +class FullyConnectedOp : public Operation { public: - explicit FullyConnectedOp(const TensorVariant& weights) : Operation(Type::fullyConnected, 1, 1), - _weights(weights) {} + FullyConnectedOp(const IODescriptor& arg, const TensorVariant& weights) + : Operation(Type::fullyConnected, {arg}), _weights(weights) {} - const TensorVariant &getWeights() const { return _weights; } + const TensorVariant& getWeights() const { return _weights; } private: TensorVariant _weights; diff --git a/contrib/nnc/include/core/modelIR/operations/PadOp.h b/contrib/nnc/include/core/modelIR/operations/PadOp.h index 8afe1d2..2040dba 100644 --- a/contrib/nnc/include/core/modelIR/operations/PadOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PadOp.h @@ -17,11 +17,9 @@ #ifndef _NCC_CORE_IR_MODEL_PAD_H_ #define _NCC_CORE_IR_MODEL_PAD_H_ -#include - #include "core/modelIR/Operation.h" - #include "core/modelIR/TensorVariant.h" +#include namespace nnc { namespace mir { @@ -35,12 +33,11 @@ public: SYMMETRIC }; - explicit PadOp(PaddingMode paddingMode, int numDims, const TensorVariant& constant_value) - : Operation(Type::pad, 1, 1), _paddingMode(paddingMode), - _numDims(numDims), _constant_value(constant_value) { - + PadOp(const IODescriptor& arg, PaddingMode paddingMode, int numDims, + const TensorVariant& constant_value) + : Operation(Type::pad, {arg}), _paddingMode(paddingMode), _numDims(numDims), + _constant_value(constant_value) { _paddings.resize(_numDims); - } PaddingMode getPaddingMode() const { return _paddingMode; } @@ -51,11 +48,9 @@ public: _paddings[dim].second = back_pad; } - std::pair getPaddingForDim(int dim) { - return _paddings[dim]; - } + std::pair getPaddingForDim(int dim) { return _paddings[dim]; } - int getNumDim() { return _numDims; } + int getNumDim() const { return _numDims; } private: PaddingMode _paddingMode; diff --git a/contrib/nnc/include/core/modelIR/operations/PoolOp.h b/contrib/nnc/include/core/modelIR/operations/PoolOp.h index 7f14f2f..578f210 100644 --- a/contrib/nnc/include/core/modelIR/operations/PoolOp.h +++ b/contrib/nnc/include/core/modelIR/operations/PoolOp.h @@ -17,41 +17,31 @@ #ifndef _NNC_CORE_IR_MODEL_POOL_H_ #define _NNC_CORE_IR_MODEL_POOL_H_ -#include - #include "core/modelIR/Operation.h" #include "core/modelIR/operations/common.h" +#include -#include "core/modelIR/Shape.h" - -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { -class PoolOp : public Operation -{ +class PoolOp : public Operation { public: - enum class PoolingType - { + enum class PoolingType { MAX, AVG, MIN }; - enum class BorderType - { + enum class BorderType { ZEROFILLED, // elements outside of input considered zero EMPTY // Consider that there are no elements outside of input shape }; - explicit PoolOp(const Shape &windowShape, const Shape &strides, PoolingType poolType, - PaddingType padding, BorderType borderType) - : Operation(Type::pool, 1, 1), _padding(padding), _poolingType(poolType), - _borderType(borderType), _windowShape(windowShape), _strides(strides) - { + PoolOp(const IODescriptor& arg, const Shape& windowShape, const Shape& strides, + PoolingType poolType, PaddingType padding, BorderType borderType) + : Operation(Type::pool, {arg}), _padding(padding), _poolingType(poolType), + _borderType(borderType), _windowShape(windowShape), _strides(strides) { _pads.resize(_windowShape.rank()); } @@ -61,9 +51,9 @@ public: PoolingType getPoolingType() const { return _poolingType; } - const Shape &getWindowShape() const { return _windowShape; } + const Shape& getWindowShape() const { return _windowShape; } - const Shape &getStrides() const { return _strides; } + const Shape& getStrides() const { return _strides; } int32_t getPadding(int32_t dim) const { return _pads[dim]; } diff --git a/contrib/nnc/include/core/modelIR/operations/ReluOp.h b/contrib/nnc/include/core/modelIR/operations/ReluOp.h index 42a86a0..2ddfcb4 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReluOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReluOp.h @@ -19,17 +19,13 @@ #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { -class ReluOp : public Operation -{ +class ReluOp : public Operation { public: - explicit ReluOp() : Operation(Type::ReLU, 1, 1) {} + explicit ReluOp(const IODescriptor& arg) : Operation(Type::ReLU, {arg}) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h b/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h index 04c25bd..31010c3 100644 --- a/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ReshapeOp.h @@ -14,21 +14,15 @@ * limitations under the License. */ -#pragma once - #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { -class ReshapeOp : public Operation -{ +class ReshapeOp : public Operation { public: - explicit ReshapeOp() : Operation(Type::reshape, 1, 1) {} + explicit ReshapeOp(const IODescriptor& arg) : Operation(Type::reshape, {arg}) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/ScaleOp.h b/contrib/nnc/include/core/modelIR/operations/ScaleOp.h index 3f207e0..891358e 100644 --- a/contrib/nnc/include/core/modelIR/operations/ScaleOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ScaleOp.h @@ -19,23 +19,19 @@ #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ - -class ScaleOp : public Operation -{ +namespace nnc { +namespace mir { +namespace ops { + +class ScaleOp : public Operation { public: - explicit ScaleOp(const TensorVariant& weights) : Operation(Type::scale, 1, 1), - _weights(weights) {} + ScaleOp(const IODescriptor& arg, const TensorVariant& weights) + : Operation(Type::scale, {arg}), _weights(weights) {} /** * @return The input 1-dimensional scale tensor. */ - const TensorVariant &getWeights() const { return _weights; } + const TensorVariant& getWeights() const { return _weights; } private: TensorVariant _weights; diff --git a/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h b/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h index 059dc0d..8d1e6a8 100644 --- a/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SoftmaxOp.h @@ -18,27 +18,20 @@ #define _NNC_CORE_IR_MODEL_SOFTMAX_H_ #include "core/modelIR/Operation.h" -#include "core/modelIR/Shape.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { /** * @brief description of softmax operation. */ -class SoftmaxOp : public Operation -{ +class SoftmaxOp : public Operation { public: - explicit SoftmaxOp(int32_t axis) : Operation(Type::softmax, 1, 1), _axis(axis) {} + SoftmaxOp(const IODescriptor& arg, int32_t axis) : Operation(Type::softmax, {arg}), _axis(axis) {} - int32_t getAxis() const - { - if (_axis < 0) - { + int32_t getAxis() const { + if (_axis < 0) { // Negative axis is used to index starting from the last element of the shape // -1 means last element, -2 means second from end, like in python int32_t res = _axis + getInputShape(0).rank(); diff --git a/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h b/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h index fb2875e..a2b28dc 100644 --- a/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h +++ b/contrib/nnc/include/core/modelIR/operations/SqueezeOp.h @@ -25,16 +25,12 @@ namespace ops { class SqueezeOp : public Operation { public: - explicit SqueezeOp(const std::vector& dims_to_squeeze) : - Operation(Type::squeeze, 1, 1), _dims_to_squeeze(dims_to_squeeze) {} + SqueezeOp(const IODescriptor& arg, const std::vector& dims_to_squeeze) + : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze) {} - int32_t getNumSqueezeDims() { - return static_cast(_dims_to_squeeze.size()); - } + int32_t getNumSqueezeDims() const { return static_cast(_dims_to_squeeze.size()); } - const std::vector& getDimsToSqueeze() { - return _dims_to_squeeze; - } + const std::vector& getDimsToSqueeze() const { return _dims_to_squeeze; } private: std::vector _dims_to_squeeze; diff --git a/contrib/nnc/include/core/modelIR/operations/TanhOp.h b/contrib/nnc/include/core/modelIR/operations/TanhOp.h index a470528..d5a4aa2 100644 --- a/contrib/nnc/include/core/modelIR/operations/TanhOp.h +++ b/contrib/nnc/include/core/modelIR/operations/TanhOp.h @@ -17,7 +17,6 @@ #ifndef _NNC_CORE_IR_MODEL_TANH_H_ #define _NNC_CORE_IR_MODEL_TANH_H_ - #include "core/modelIR/Operation.h" namespace nnc { @@ -26,7 +25,7 @@ namespace ops { class TanhOp : public Operation { public: - explicit TanhOp() : Operation(Type::tanh, 1, 1) {} + explicit TanhOp(const IODescriptor& arg) : Operation(Type::tanh, {arg}) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/VariableOp.h b/contrib/nnc/include/core/modelIR/operations/VariableOp.h index e71cbb8..e1bec2c 100644 --- a/contrib/nnc/include/core/modelIR/operations/VariableOp.h +++ b/contrib/nnc/include/core/modelIR/operations/VariableOp.h @@ -19,17 +19,13 @@ #include "core/modelIR/Operation.h" -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { -class VariableOp : public Operation -{ +class VariableOp : public Operation { public: - explicit VariableOp() : Operation(Type::variable, 0, 1) {} + VariableOp() : Operation(Type::variable, {}) {} }; } // namespace ops diff --git a/contrib/nnc/include/core/modelIR/operations/common.h b/contrib/nnc/include/core/modelIR/operations/common.h index 81467a9..42a9992 100644 --- a/contrib/nnc/include/core/modelIR/operations/common.h +++ b/contrib/nnc/include/core/modelIR/operations/common.h @@ -17,12 +17,9 @@ #ifndef _NNC_CORE_IR_MODEL_COMMON_H_ #define _NNC_CORE_IR_MODEL_COMMON_H_ -namespace nnc -{ -namespace mir -{ -namespace ops -{ +namespace nnc { +namespace mir { +namespace ops { enum class PaddingType { Same, diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 6bf4711..3965ec6 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -227,7 +227,7 @@ std::vector CaffeOpCreator::convertInput(const LayerParameter& lay { const auto& blob_name = layer.top(i); const auto& blob_shape = params.shape(num_shapes == 1 ? 0 : i); - auto variable = createOp({}); + auto variable = createOp(); variable->setName(blob_name); Shape shape = ShapeHelper::createShape(blob_shape.dim(), blob_shape.dim_size()); shape = ShapeHelper::cutOffBatchDim(shape); @@ -271,15 +271,14 @@ CaffeOpCreator::convertConvolution(const std::vector& inputs, // This is depthwise convolution // TODO handle properly kernel with layer multiplier std::shared_ptr transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(params[0]); - conv2d = createOp(inputs, std::move(*transposed_tensor), - stride_shape, pad_type); + conv2d = createOp(inputs[0], *transposed_tensor, stride_shape, + pad_type); } else { if (num_groups != 1) { // first we need to convert kernel of grouped convolution to appropriate ordinary kernel unfolded_tensor = fixGroupedKernel(opts.group(), params[0]); } - conv2d = createOp(inputs, std::move(*unfolded_tensor), - stride_shape, pad_type); + conv2d = createOp(inputs[0], *unfolded_tensor, stride_shape, pad_type); } // Set pads @@ -302,7 +301,7 @@ CaffeOpCreator::convertConvolution(const std::vector& inputs, // bias_term is optional (so might not be present) and defaults to true if (!opts.has_bias_term() || opts.bias_term()) { - auto bias_add = createOp({conv2d->getOutput(0)}, std::move(*params[1])); + auto bias_add = createOp(conv2d->getOutput(0), *params[1]); return {bias_add->getOutput(0)}; } else { return {conv2d->getOutput(0)}; @@ -332,17 +331,16 @@ CaffeOpCreator::convertInnerProduct(const std::vector& inputs, const caffe::InnerProductParameter& opts) { // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize] // It is needed because Caffe InnerProduct layer takes NCHW input and flattens the CHW part. - auto reshape = createOp(inputs); + auto reshape = createOp(inputs[0]); int32_t fc_input_size = static_cast( params[0]->getShape().numElements()) / opts.num_output(); reshape->setOutputShape(0, {1, fc_input_size}); - auto fully_connected = createOp({reshape->getOutput(0)}, - std::move(*params[0])); + auto fully_connected = createOp(reshape->getOutput(0), *params[0]); // bias_term is optional (so might not be present) and defaults to true if (!opts.has_bias_term() || opts.bias_term()) { - auto add_op = createOp({fully_connected->getOutput(0)}, std::move(*params[1])); + auto add_op = createOp(fully_connected->getOutput(0), *params[1]); return {add_op->getOutput(0)}; } else { return {fully_connected->getOutput(0)}; @@ -352,7 +350,7 @@ CaffeOpCreator::convertInnerProduct(const std::vector& inputs, std::vector CaffeOpCreator::convertConcat(const std::vector& inputs, const caffe::ConcatParameter& opts) { - auto result = createOp(inputs, inputs.size(), getAxisValue(opts)); + auto result = createOp(inputs, getAxisValue(opts)); return {result->getOutput(0)}; } @@ -389,8 +387,8 @@ CaffeOpCreator::convertPooling(const std::vector& inputs, assert(false); } - auto pooling = createOp(inputs, window_shape, stride, pool_type, - pad_type, border_type); + auto pooling = createOp(inputs[0], window_shape, stride, pool_type, pad_type, + border_type); // Set pads auto op = static_cast(pooling); @@ -408,7 +406,7 @@ CaffeOpCreator::convertPooling(const std::vector& inputs, std::vector CaffeOpCreator::convertSoftmax(const std::vector& inputs, const caffe::SoftmaxParameter& opts) { - auto softmax = createOp(inputs, getAxisValue(opts)); + auto softmax = createOp(inputs[0], getAxisValue(opts)); return {softmax->getOutput(0)}; } @@ -436,7 +434,7 @@ void CaffeOpCreator::checkReshape(const ReshapeParameter& opts, std::vector CaffeOpCreator::convertReshape(const std::vector& inputs, const caffe::ReshapeParameter& opts) { - auto reshape = createOp(inputs); + auto reshape = createOp(inputs[0]); Shape new_shape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size()); reshape->setOutputShape(0, new_shape); return {reshape->getOutput(0)}; @@ -450,7 +448,7 @@ void CaffeOpCreator::checkReLU(const ReLUParameter& opts, std::vector CaffeOpCreator::convertReLU(const std::vector& inputs) { - auto relu = createOp(inputs); + auto relu = createOp(inputs[0]); return {relu->getOutput(0)}; } @@ -458,11 +456,11 @@ std::vector CaffeOpCreator::convertScale(const std::vector& inputs, const std::vector>& params, const caffe::ScaleParameter& opts) { - auto scale = createOp(inputs, std::move(*params[0])); + auto scale = createOp(inputs[0], std::move(*params[0])); // bias_term is optional (so might not be present) and defaults to true if (!opts.has_bias_term() || opts.bias_term()) { - auto bias_add = createOp({scale->getOutput(0)}, std::move(*params[1])); + auto bias_add = createOp(scale->getOutput(0), *params[1]); return {bias_add->getOutput(0)}; } else { return {scale->getOutput(0)}; @@ -494,7 +492,7 @@ CaffeOpCreator::convertBatchNorm(const std::vector& inputs, for (Index idx: ShapeRange(bias_data.getShape())) bias_data.at(idx) *= -scale_factor; - auto bias_add = createOp(inputs, std::move(*params[0])); + auto bias_add = createOp(inputs[0], *params[0]); // create scale argument from variance: // multiply elements of variance by scaleFactor and @@ -502,7 +500,7 @@ CaffeOpCreator::convertBatchNorm(const std::vector& inputs, Tensor scale_data(*params[1]); for (Index idx: ShapeRange(scale_data.getShape())) scale_data.at(idx) = 1.0f / std::sqrt(scale_data.at(idx) * scale_factor + eps); - auto scale = createOp({bias_add->getOutput(0)}, std::move(*params[1])); + auto scale = createOp(bias_add->getOutput(0), *params[1]); return {scale->getOutput(0)}; } @@ -510,7 +508,7 @@ CaffeOpCreator::convertBatchNorm(const std::vector& inputs, std::vector CaffeOpCreator::convertDropout(const std::vector& inputs, const caffe::DropoutParameter& opts) { - auto dropout = createOp(inputs, opts.dropout_ratio()); + auto dropout = createOp(inputs[0], opts.dropout_ratio()); return {dropout->getOutput(0)}; } @@ -526,8 +524,7 @@ CaffeOpCreator::convertDeconvolution(const std::vector& inputs, // first we need to convert kernel of grouped convolution to appropriate ordinary kernel unfolded_tensor = fixGroupedKernel(opts.group(), params[0]); } - auto deconv2d = createOp(inputs, std::move(*unfolded_tensor), - stride_shape, pad_type); + auto deconv2d = createOp(inputs[0], *unfolded_tensor, stride_shape, pad_type); // Set pads auto op = static_cast(deconv2d); @@ -542,7 +539,7 @@ CaffeOpCreator::convertDeconvolution(const std::vector& inputs, // bias_term is optional (so might not be present) and defaults to true if (!opts.has_bias_term() || opts.bias_term()) { - auto bias_add = createOp({deconv2d->getOutput(0)}, std::move(*params[1])); + auto bias_add = createOp(deconv2d->getOutput(0), *params[1]); return {bias_add->getOutput(0)}; } else { return {deconv2d->getOutput(0)}; @@ -553,13 +550,13 @@ std::vector CaffeOpCreator::convertELU(const std::vector& inputs, const std::vector>& params, const caffe::ELUParameter& opts) { - auto elu = createOp(inputs, opts.alpha()); + auto elu = createOp(inputs[0], opts.alpha()); return {elu->getOutput(0)}; } std::vector CaffeOpCreator::convertTanH(const std::vector& inputs) { - auto tanh = createOp(inputs); + auto tanh = createOp(inputs[0]); return {tanh->getOutput(0)}; } @@ -581,7 +578,7 @@ CaffeOpCreator::convertEltwise(const std::vector& inputs, optype = ops::ElementwiseOp::OpType::max; break; } - auto elementwise = createOp(inputs, optype, inputs.size()); + auto elementwise = createOp(inputs, optype); return {elementwise->getOutput(0)}; } @@ -592,9 +589,4 @@ CaffeOpCreator::convertSplit(const std::vector& inputs, return outputs; } -void CaffeOpCreator::connectInputs(mir::Operation* op, const std::vector& inputs) { - for (int i = 0; i < static_cast(inputs.size()); ++i) - op->connectInputTo(i, inputs[i]); -} - } // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h index ce0d5e7..607495b 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h @@ -110,19 +110,15 @@ public: private: Graph* _graph = nullptr; - void connectInputs(mir::Operation*, const std::vector& inputs); - template - mir::Operation* createOp(const std::vector& inputs, Types&&... args); + mir::Operation* createOp(Types&&... args); }; template mir::Operation* -CaffeOpCreator::createOp(const std::vector& inputs, Types&&... args) { +CaffeOpCreator::createOp(Types&&... args) { // TODO: set operation names - auto op = _graph->create("", std::forward(args)...); - connectInputs(op, inputs); - return op; + return _graph->create("", std::forward(args)...); } } // namespace nnc diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 0348082..441fc16 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -48,7 +48,10 @@ std::vector ONNXOpCreator::createConv2D(InputOps inputs, InputParams } std::vector ONNXOpCreator::createConcat(InputOps inputs, int axis) { - return createOp(inputs, inputs.size(), axis); + std::vector descriptors; + for (auto input : inputs) + descriptors.push_back(inputs[0]->getOutput(0)); + return createOp(descriptors, axis); } std::vector ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode opCode) { @@ -56,20 +59,18 @@ std::vector ONNXOpCreator::createPool(InputOps inputs, ONNXOpCode op } std::vector ONNXOpCreator::createSoftmax(InputOps inputs, int axis) { - return createOp(inputs, axis); + return createOp(inputs[0]->getOutput(0), axis); } std::vector ONNXOpCreator::createReshape(Operation* inputData, Shape outputShape) { - std::vector inputNodes; - inputNodes.push_back(inputData); - auto outputs = createOp(inputNodes); + auto outputs = createOp(inputData->getOutput(0)); outputs[0]->setOutputShape(0, outputShape); return outputs; } std::vector ONNXOpCreator::createRelu(InputOps inputs) { assert(inputs.size() == 1); - return createOp(inputs); + return createOp(inputs[0]->getOutput(0)); } std::vector ONNXOpCreator::createScale(InputOps inputs, InputParams params, ::onnx::NodeProto node) { @@ -81,13 +82,7 @@ std::vector ONNXOpCreator::createBatchNorm(InputOps inputs, InputPar } std::vector ONNXOpCreator::createDropout(InputOps inputs, float ratio) { - return createOp(inputs, ratio); + return createOp(inputs[0]->getOutput(0), ratio); } -void ONNXOpCreator::connectInputs(Operation* op, InputOps inputs) { - // TODO: this part doesn't support the situation where an operator takes as input - // some tensor that is not the 0th output of some other operator - for (int i = 0; i < static_cast(inputs.size()); ++i) - op->connectInputTo(i, inputs[i]->getOutput(0)); -} } // namespace nnc diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index a20d723..99c69f3 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -49,21 +49,16 @@ public: _graph = g; } private: - void connectInputs(nnc::mir::Operation* op, std::vector& inputs); template - std::vector createOp(std::vector& inputs, Types&&... args); + std::vector createOp(Types&&... args); mir::Graph* _graph = nullptr; }; template -std::vector ONNXOpCreator::createOp(std::vector& inputs, Types&&... args) { - std::vector outputs; - +std::vector ONNXOpCreator::createOp(Types&&... args) { // TODO: set operation names auto op = _graph->create("", std::forward(args)...); - connectInputs(op, inputs); - outputs.push_back(op); - return outputs; + return {op}; } } // namespace nnc #endif //NNCC_ONNX_OP_CREATOR_H diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index 901ce09..cf21918 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -45,12 +45,13 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts, std::vector TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params, const Conv2DOptions* opts) { - auto outputs = createOp(inputs, ActivationFunctionType_NONE, std::move(*params[0]), + auto outputs = createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0), + *params[0], Shape{static_cast(opts->stride_h()), static_cast(opts->stride_w()), 1}, paddingMap[opts->padding()]); - return createOp(outputs, opts->fused_activation_function(), - std::move(*params[1])); + return createOp(opts->fused_activation_function(), outputs[0]->getOutput(0), + *params[1]); } void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts, @@ -61,13 +62,14 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts, std::vector TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params, const DepthwiseConv2DOptions* opts) { - auto outputs = createOp( - inputs, ActivationFunctionType_NONE, std::move(*params[0]), - Shape{static_cast(opts->stride_h()), - static_cast(opts->stride_w()), 1}, - paddingMap[opts->padding()]); - return createOp(outputs, opts->fused_activation_function(), - std::move(*params[1])); + auto outputs = createOp(ActivationFunctionType_NONE, + inputs[0]->getOutput(0), + *params[0], + Shape{static_cast(opts->stride_h()), + static_cast(opts->stride_w()), 1}, + paddingMap[opts->padding()]); + return createOp(opts->fused_activation_function(), outputs[0]->getOutput(0), + *params[1]); } void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts, @@ -78,9 +80,11 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts, std::vector TFLiteOpCreator::convertConcatenation(InputOps inputs, InputParams params, const ConcatenationOptions* opts) { + std::vector descriptors; + for (auto i : inputs) + descriptors.push_back(i->getOutput(0)); // Decrementing axis to account for the unnecessary batch dimension - return createOp(inputs, opts->fused_activation_function(), inputs.size(), - opts->axis() - 1); + return createOp(opts->fused_activation_function(), descriptors, opts->axis() - 1); } void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts, @@ -90,7 +94,7 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts, std::vector TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params, const Pool2DOptions* opts) { - return createOp(inputs, opts->fused_activation_function(), + return createOp(opts->fused_activation_function(), inputs[0]->getOutput(0), Shape{static_cast(opts->filter_height()), static_cast(opts->filter_width()), 1}, Shape{static_cast(opts->stride_h()), @@ -102,7 +106,7 @@ std::vector TFLiteOpCreator::convertMaxPool2D(InputOps inputs, std::vector TFLiteOpCreator::convertAveragePool2D(InputOps inputs, InputParams params, const Pool2DOptions* opts) { - return createOp(inputs, opts->fused_activation_function(), + return createOp(opts->fused_activation_function(), inputs[0]->getOutput(0), Shape{static_cast(opts->filter_height()), static_cast(opts->filter_width()), 1}, Shape{static_cast(opts->stride_h()), @@ -114,12 +118,12 @@ std::vector TFLiteOpCreator::convertAveragePool2D(InputOps inpu std::vector TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params, const SoftmaxOptions* opts) { // -1 represents last one dimension - return createOp(inputs, ActivationFunctionType_NONE, -1); + return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0), -1); } std::vector TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params, const ReshapeOptions* opts) { - auto outputs = createOp(inputs, ActivationFunctionType_NONE); + auto outputs = createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0)); // TODO: we should also support "-1" values in new_shape, which means that correct // shape values must be calculated. Better do it in the shape inference module. @@ -129,36 +133,41 @@ std::vector TFLiteOpCreator::convertReshape(InputOps inputs, In return outputs; } -std::vector TFLiteOpCreator::createTransposeConv( - InputOps& inputs, - InputParams& params, - const ::tflite::TransposeConvOptions* opts) {// first param is output shape - return createOp(inputs, ActivationFunctionType_NONE, std::move(*params[1]), - Shape{static_cast(opts->stride_h()), - static_cast(opts->stride_w()), 1}, - paddingMap[opts->padding()]); +std::vector +TFLiteOpCreator::createTransposeConv(InputOps& inputs, InputParams& params, + const ::tflite::TransposeConvOptions* opts) { + return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0), *params[1], + Shape{static_cast(opts->stride_h()), + static_cast(opts->stride_w()), 1}, + paddingMap[opts->padding()]); } std::vector -TFLiteOpCreator::createAdd(InputOps& inputs, - InputParams&, const ::tflite::AddOptions* opts) { - return createOp( - inputs, opts->fused_activation_function(), ops::ElementwiseOp::OpType::sum, inputs.size()); +TFLiteOpCreator::createAdd(InputOps& inputs, InputParams&, const ::tflite::AddOptions* opts) { + std::vector descriptors; + for (auto i : inputs) + descriptors.push_back(i->getOutput(0)); + return createOp(opts->fused_activation_function(), descriptors, + ops::ElementwiseOp::OpType::sum); } std::vector -TFLiteOpCreator::createMul(InputOps& inputs, - InputParams&, const ::tflite::MulOptions* opts) { - return createOp( - inputs, opts->fused_activation_function(), ops::ElementwiseOp::OpType::prod, inputs.size()); +TFLiteOpCreator::createMul(InputOps& inputs, InputParams&, const ::tflite::MulOptions* opts) { + std::vector descriptors; + for (auto i : inputs) + descriptors.push_back(i->getOutput(0)); + return createOp(opts->fused_activation_function(), descriptors, + ops::ElementwiseOp::OpType::prod); } - std::vector -TFLiteOpCreator::createMax(InputOps& inputs, - InputParams&, const ::tflite::MaximumMinimumOptions* opts) { - return createOp( - inputs, ActivationFunctionType_NONE, ops::ElementwiseOp::OpType::max, inputs.size()); +TFLiteOpCreator::createMax(InputOps& inputs, InputParams&, + const ::tflite::MaximumMinimumOptions* opts) { + std::vector descriptors; + for (auto i : inputs) + descriptors.push_back(i->getOutput(0)); + return createOp(ActivationFunctionType_NONE, descriptors, + ops::ElementwiseOp::OpType::max); } void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts, @@ -171,22 +180,22 @@ TFLiteOpCreator::convertFullyConnected(InputOps& inputs, InputParams& params, const FullyConnectedOptions* opts) { // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize] - auto outputs = createOp(inputs, ActivationFunctionType_NONE); + auto outputs = createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0)); int32_t fcInputSize = params[0]->getShape().dim(0); outputs[0]->setOutputShape(0, {1, fcInputSize}); - auto fc_outputs = createOp(outputs, ActivationFunctionType_NONE, - std::move(*params[0])); - return createOp(fc_outputs, opts->fused_activation_function(), - std::move(*params[1])); + auto fc_outputs = createOp(ActivationFunctionType_NONE, + outputs[0]->getOutput(0), *params[0]); + return createOp(opts->fused_activation_function(), fc_outputs[0]->getOutput(0), + *params[1]); } void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type, std::set& problems_op_set) { - if (activation_type != ActivationFunctionType_NONE - && activation_type != ActivationFunctionType_RELU - && activation_type != ActivationFunctionType_RELU6 - && activation_type != ActivationFunctionType_TANH) + if (activation_type != ActivationFunctionType_NONE && + activation_type != ActivationFunctionType_RELU && + activation_type != ActivationFunctionType_RELU6 && + activation_type != ActivationFunctionType_TANH) problems_op_set.insert(std::string("Unsupported activation type: ") + EnumNamesActivationFunctionType()[activation_type]); } @@ -197,42 +206,33 @@ mir::Operation* TFLiteOpCreator::addFusedActivation(mir::Operation* input, if (activation_type != ActivationFunctionType_NONE) { // TODO: process other activation types + assert(input->getNumOutputs() == 1); switch (activation_type) { case ActivationFunctionType_RELU: - activation = graph->create(""); + activation = _graph->create("", input->getOutput(0)); break; case ActivationFunctionType_RELU6: - activation = graph->create("", 6); + activation = _graph->create("", input->getOutput(0), 6); break; case ActivationFunctionType_TANH: - activation = graph->create(""); + activation = _graph->create("", input->getOutput(0)); break; default: assert(false && "Unsupported activation types must be detected before this pass"); } - - assert(input->getNumOutputs() == 1); - activation->connectInputTo(0, input->getOutput(0)); return activation; } else { return input; } } -void TFLiteOpCreator::connectInputs(mir::Operation* op, std::vector& inputs) { - // TODO: this part doesn't support the situation where an operator takes as input - // some tensor that is not the 0th output of some other operator - assert(inputs.size() == op->getNumInputs()); - for (size_t i = 0; i < inputs.size(); ++i) - op->connectInputTo(i, inputs[i]->getOutput(0)); -} - std::vector TFLiteOpCreator::createSqueeze(InputOps inputs, InputParams params, const ::tflite::SqueezeOptions* opts) { std::vector squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()}; - return createOp(inputs, ActivationFunctionType_NONE, squeeze_dims); + return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0), + squeeze_dims); } } // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index bceed15..65f040b 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -44,7 +44,7 @@ public: using InputOps = std::vector&; using InputParams = std::vector>&; - explicit TFLiteOpCreator(Graph* g) : graph(g) {}; + explicit TFLiteOpCreator(Graph* g) : _graph(g) {} std::vector convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*); @@ -97,7 +97,7 @@ public: void checkFullyConnected(const ::tflite::FullyConnectedOptions*, std::set&); private: - Graph* graph = nullptr; + Graph* _graph; std::map<::tflite::Padding, ops::PaddingType> paddingMap = { {::tflite::Padding_SAME, ops::PaddingType::Same}, @@ -108,27 +108,18 @@ private: mir::Operation* addFusedActivation(mir::Operation* input, ::tflite::ActivationFunctionType activationType); - void connectInputs(mir::Operation* op, std::vector& inputs); - template - std::vector createOp(std::vector& inputs, - ::tflite::ActivationFunctionType activation, - Types&& ... args); + std::vector createOp(::tflite::ActivationFunctionType activation, + Types&&... args); }; template -std::vector TFLiteOpCreator::createOp( - std::vector& inputs, - ::tflite::ActivationFunctionType activation, Types&& ... args) { - std::vector outputs; - +std::vector +TFLiteOpCreator::createOp(::tflite::ActivationFunctionType activation, Types&& ... args) { // TODO: how to name operations? in Tensorflow tensors get names, not operations - auto op = graph->create("", std::forward(args)...); - - connectInputs(op, inputs); - outputs.push_back(addFusedActivation(op, activation)); - - return outputs; + auto op = _graph->create("", std::forward(args)...); + auto fused_op = addFusedActivation(op, activation); + return {fused_op}; } } // namespace nnc diff --git a/contrib/nnc/tests/interpreter/graph_creator.cpp b/contrib/nnc/tests/interpreter/graph_creator.cpp index d40a478..864be5d 100644 --- a/contrib/nnc/tests/interpreter/graph_creator.cpp +++ b/contrib/nnc/tests/interpreter/graph_creator.cpp @@ -39,117 +39,122 @@ using namespace nnc; using namespace nnc::mir; - static Operation* createFullyConnected(std::unique_ptr& g, - const opinfo::OperatorInfo* opInfo) -{ - return g->create( - "y", *getKernel(opInfo)); + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], *getKernel(opInfo)); } -static Operation* createConv2D(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - return g->create( - "y", *getKernel(opInfo), getShapeParam(opInfo, 0), getPaddingType(opInfo)); +static Operation* createConv2D(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], *getKernel(opInfo), getShapeParam(opInfo, 0), + getPaddingType(opInfo)); } static Operation* createDepthwiseConv2D(std::unique_ptr& g, - const opinfo::OperatorInfo* opInfo) -{ - return g->create( - "y", *getKernel(opInfo), getShapeParam(opInfo, 0), getPaddingType(opInfo)); + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], *getKernel(opInfo), + getShapeParam(opInfo, 0), getPaddingType(opInfo)); } -static Operation* createPool(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - return g->create("y", getShapeParam(opInfo, 0), getShapeParam(opInfo, 1), - getPoolingType(opInfo), getPaddingType(opInfo), ops::PoolOp::BorderType::ZEROFILLED); +static Operation* createPool(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], getShapeParam(opInfo, 0), getShapeParam(opInfo, 1), + getPoolingType(opInfo), getPaddingType(opInfo), + ops::PoolOp::BorderType::ZEROFILLED); } -static Operation* createConcatenation(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - return g->create("y", opInfo->inputs()->size(), getAxis(opInfo)); +static Operation* createConcatenation(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs, getAxis(opInfo)); } -static Operation* createReshape(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - auto op = g->create("y"); +static Operation* createReshape(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + auto op = g->create("y", inputs[0]); op->setOutputShape(0, getShapeParam(opInfo, 0)); return op; } -static Operation* createReLU(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ +static Operation* createReLU(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { (void)opInfo; - return g->create("y"); + return g->create("y", inputs[0]); } -static Operation* createCappedReLU(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - return g->create("y", getAxis(opInfo)); +static Operation* createCappedReLU(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], getAxis(opInfo)); } -static Operation* createSoftmax(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - return g->create("y", getAxis(opInfo)); +static Operation* createSoftmax(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], getAxis(opInfo)); } -static Operation* createBiasAdd(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - return g->create("y", *getKernel(opInfo)); +static Operation* createBiasAdd(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + return g->create("y", inputs[0], *getKernel(opInfo)); } -static Operation* createOp(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) -{ - switch (opInfo->op()) - { +static Operation* createOp(std::unique_ptr& g, + const std::vector& inputs, + const opinfo::OperatorInfo* opInfo) { + switch (opInfo->op()) { case opinfo::OperatorType_FULLY_CONNECTED: - return createFullyConnected(g, opInfo); + return createFullyConnected(g, inputs, opInfo); case opinfo::OperatorType_CONV_2D: - return createConv2D(g, opInfo); + return createConv2D(g, inputs, opInfo); case opinfo::OperatorType_DEPTHWISE_CONV_2D: - return createDepthwiseConv2D(g, opInfo); + return createDepthwiseConv2D(g, inputs, opInfo); case opinfo::OperatorType_POOL_2D: - return createPool(g, opInfo); + return createPool(g, inputs, opInfo); case opinfo::OperatorType_CONCATENATION: - return createConcatenation(g, opInfo); + return createConcatenation(g, inputs, opInfo); case opinfo::OperatorType_RESHAPE: - return createReshape(g, opInfo); + return createReshape(g, inputs, opInfo); case opinfo::OperatorType_RELU: - return createReLU(g, opInfo); + return createReLU(g, inputs, opInfo); case opinfo::OperatorType_SOFTMAX: - return createSoftmax(g, opInfo); + return createSoftmax(g, inputs, opInfo); case opinfo::OperatorType_CAPPED_RELU: - return createCappedReLU(g, opInfo); + return createCappedReLU(g, inputs, opInfo); case opinfo::OperatorType_BIAS_ADD: - return createBiasAdd(g, opInfo); + return createBiasAdd(g, inputs, opInfo); default: assert(false); } } -std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo) -{ +std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo) { // Create graph std::unique_ptr g(new Graph()); + std::vector inputs; - // Create operation node - auto opNode = createOp(g, opInfo); - - for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i) - { + for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i) { // Create i-th input node auto inputOp = g->create("x" + std::to_string(i)); - // Connect i-th operation input to i-th input node - opNode->connectInputTo(i, inputOp->getOutput(0)); - // Set input shape auto inputShapeIter = opInfo->inputs()->Get(i)->shape()->dims(); Shape inputShape = ShapeHelper::createShape(*inputShapeIter, inputShapeIter->size()); inputOp->setOutputShape(0, inputShape); + + inputs.push_back(inputOp->getOutput(0)); } + // Create operation node + auto opNode = createOp(g, inputs, opInfo); + // Mark outputs g->markOutput(opNode); diff --git a/contrib/nnc/tests/soft_backend/CompileCPP.cpp b/contrib/nnc/tests/soft_backend/CompileCPP.cpp index 22c11ec..270f5a2 100644 --- a/contrib/nnc/tests/soft_backend/CompileCPP.cpp +++ b/contrib/nnc/tests/soft_backend/CompileCPP.cpp @@ -48,15 +48,11 @@ using namespace nnc::mir; // Creates simple graph with input and output void fillGraph(Graph &g) { - Operation* outputOp = g.create("out"); Shape inputShape{1, 2, 3}; - Operation* inputOp = g.create("in"); - - outputOp->connectInputTo(0, inputOp->getOutput(0)); - inputOp->setOutputShape(0, inputShape); + Operation* outputOp = g.create("out", inputOp->getOutput(0)); g.markOutput(outputOp); ShapeInference shapeInferencer; diff --git a/contrib/nnc/unittests/core/Graph.cpp b/contrib/nnc/unittests/core/Graph.cpp index 97f8ade..ec9c8ca 100644 --- a/contrib/nnc/unittests/core/Graph.cpp +++ b/contrib/nnc/unittests/core/Graph.cpp @@ -33,17 +33,12 @@ TEST(Graph, ReplaceInputs) { auto g = new Graph; auto n1 = g->create("op1"); - auto n2 = g->create("op2"); - auto n3 = g->create("op3"); - auto n4 = g->create("op4"); - auto n5 = g->create("op5", 2, 0); - - n2->connectInputTo(0, n1->getOutput(0)); - n3->connectInputTo(0, n2->getOutput(0)); - n4->connectInputTo(0, n2->getOutput(0)); - - n5->connectInputTo(0, n3->getOutput(0)); - n5->connectInputTo(1, n4->getOutput(0)); + auto n2 = g->create("op2", n1->getOutput(0)); + auto n3 = g->create("op3", n2->getOutput(0)); + auto n4 = g->create("op4", n2->getOutput(0)); + auto n5 = g->create("op5", + std::vector{n3->getOutput(0), n4->getOutput(0)}, + 0); g->replaceInputNodes({"op1", "op4"}); @@ -63,17 +58,12 @@ TEST(Graph, ReplaceOutputs) { auto g = new Graph; auto n1 = g->create("op1"); - auto n2 = g->create("op2"); - auto n3 = g->create("op3"); - auto n4 = g->create("op4"); - auto n5 = g->create("op5", 2, 0); - - n2->connectInputTo(0, n1->getOutput(0)); - n3->connectInputTo(0, n2->getOutput(0)); - n4->connectInputTo(0, n2->getOutput(0)); - - n5->connectInputTo(0, n3->getOutput(0)); - n5->connectInputTo(1, n4->getOutput(0)); + auto n2 = g->create("op2", n1->getOutput(0)); + auto n3 = g->create("op3", n2->getOutput(0)); + auto n4 = g->create("op4", n2->getOutput(0)); + auto n5 = g->create("op5", + std::vector{n3->getOutput(0), n4->getOutput(0)}, + 0); g->replaceOutputNodes({"op3"}); @@ -86,9 +76,7 @@ TEST(Graph, ReplaceOutputNodeWithInput) { auto g = new Graph; auto n1 = g->create("op1"); - auto n2 = g->create("op2"); - - n2->connectInputTo(0, n1->getOutput(0)); + auto n2 = g->create("op2", n1->getOutput(0)); g->markOutput(n2); diff --git a/contrib/nnc/unittests/core/NodeReplacer.cpp b/contrib/nnc/unittests/core/NodeReplacer.cpp index 4179f69..a1235e4 100644 --- a/contrib/nnc/unittests/core/NodeReplacer.cpp +++ b/contrib/nnc/unittests/core/NodeReplacer.cpp @@ -32,14 +32,10 @@ public: TEST(NodeMutatorTest, SimpleChainTest) { auto g = new Graph; auto n1 = g->create("op1"); - auto n2 = g->create("op2"); - auto n3 = g->create("op3"); - auto n4 = g->create("op4"); - auto n5 = g->create("op5"); - - n2->connectInputTo(0, n1->getOutput(0)); - n3->connectInputTo(0, n2->getOutput(0)); - n4->connectInputTo(0, n2->getOutput(0)); + auto n2 = g->create("op2", n1->getOutput(0)); + auto n3 = g->create("op3", n2->getOutput(0)); + auto n4 = g->create("op4", n2->getOutput(0)); + auto n5 = g->create("op5", n1->getOutput(0)); g->replaceNode(n2, n5); delete n2; diff --git a/contrib/nnc/unittests/core/ShapeInference.cpp b/contrib/nnc/unittests/core/ShapeInference.cpp index 0322809..84e654a 100644 --- a/contrib/nnc/unittests/core/ShapeInference.cpp +++ b/contrib/nnc/unittests/core/ShapeInference.cpp @@ -33,10 +33,9 @@ TEST(ShapeInferenceTest, ReshapeAutoDimension) { auto input = g.create("input"); input->setOutputShape(0, Shape{ 10, 2, 5} ); - auto op = g.create("reshape"); + auto op = g.create("reshape", input->getOutput(0)); op->setInputShape( 0, Shape{10, 2, 5} ); op->setOutputShape(0, Shape{10, 1, Shape::AUTO_DIM} ); - op->connectInputTo(0, input->getOutput(0)); si.visit(*dynamic_cast(op)); @@ -55,9 +54,8 @@ TEST(ShapeInferenceTest, ReshapeAutoDimensionVaryRank) { input->setOutputShape(0, inputShape); - auto op = g.create("reshape"); + auto op = g.create("reshape", input->getOutput(0)); op->setInputShape( 0, inputShape); - op->connectInputTo(0, input->getOutput(0)); // test shrink op->setOutputShape(0, Shape{10, Shape::AUTO_DIM}); @@ -80,8 +78,7 @@ TEST(ShapeInferenceTest, SqueezeTestAllDims) { auto input = g.create("input"); input->setOutputShape(0, input_shape); - auto sq1 = g.create("squeeze_1", std::vector{}); - sq1->connectInputTo(0, input->getOutput(0)); + auto sq1 = g.create("squeeze_1", input->getOutput(0), std::vector{}); g.accept(&si); @@ -99,8 +96,7 @@ TEST(ShapeInferenceTest, SqueezeTestSpecificDims) { input->setOutputShape(0, input_shape); - auto sq1 = g.create("squeeze_1", std::vector{2}); - sq1->connectInputTo(0, input->getOutput(0)); + auto sq1 = g.create("squeeze_1", input->getOutput(0), std::vector{2}); g.accept(&si); @@ -118,8 +114,7 @@ TEST(ShapeInferenceTest, SqueezeTestScalarResult) { input->setOutputShape(0, input_shape); - auto sq1 = g.create("squeeze_1", std::vector{}); - sq1->connectInputTo(0, input->getOutput(0)); + auto sq1 = g.create("squeeze_1", input->getOutput(0), std::vector{}); g.accept(&si); diff --git a/contrib/nnc/unittests/core/operation.cpp b/contrib/nnc/unittests/core/operation.cpp index f7b5eaa..b0fff70 100644 --- a/contrib/nnc/unittests/core/operation.cpp +++ b/contrib/nnc/unittests/core/operation.cpp @@ -15,6 +15,7 @@ */ #include "core/modelIR/Operation.h" +#include "core/modelIR/operations/VariableOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/ConcatOp.h" @@ -25,13 +26,11 @@ using namespace nnc::mir; TEST(Operation, ConnectionTest) { - auto op1 = new ops::ReshapeOp(); + auto op1 = new ops::VariableOp(); op1->setId(0); - auto op2 = new ops::ReshapeOp(); + auto op2 = new ops::ReshapeOp(op1->getOutput(0)); op2->setId(1); - op2->connectInputTo(0, op1->getOutput(0)); - ASSERT_EQ(op1->getId(), op2->getPrevNodes()[0].op->getId()); delete op1; @@ -42,9 +41,10 @@ TEST(Operation, InputOutputShapeTest) { Shape inShape{1,2,3}; Shape outShape{3,2,1}; - ops::SoftmaxOp op(0); - op.setInputShape(0, inShape ); - op.setOutputShape(0, outShape ); + ops::VariableOp input; + ops::SoftmaxOp op(input.getOutput(0), 0); + op.setInputShape(0, inShape); + op.setOutputShape(0, outShape); ASSERT_EQ(inShape, op.getInputShape(0)); ASSERT_EQ(outShape, op.getOutputShape(0)); @@ -53,15 +53,17 @@ TEST(Operation, InputOutputShapeTest) { TEST(Operation, SoftmaxAxisTest) { Shape inShape{1,2,3}; - ops::SoftmaxOp op_1(1); + ops::VariableOp input; + + ops::SoftmaxOp op_1(input.getOutput(0), 1); op_1.setInputShape(0, inShape); ASSERT_EQ(op_1.getAxis(), 1); - ops::SoftmaxOp op_n1(-1); + ops::SoftmaxOp op_n1(input.getOutput(0), -1); op_n1.setInputShape(0, inShape); ASSERT_EQ(op_n1.getAxis(), 2); - ops::SoftmaxOp op_n3(-3); + ops::SoftmaxOp op_n3(input.getOutput(0), -3); op_n3.setInputShape(0, inShape); ASSERT_EQ(op_n3.getAxis(), 0); } @@ -69,15 +71,17 @@ TEST(Operation, SoftmaxAxisTest) { TEST(Operation, ConcatAxisTest) { Shape inShape{1,2,3}; - ops::ConcatOp op_1(2, 1); + ops::VariableOp input1, input2; + + ops::ConcatOp op_1({input1.getOutput(0), input2.getOutput(0)}, 1); op_1.setInputShape(0, inShape); ASSERT_EQ(op_1.getAxis(), 1); - ops::ConcatOp op_n1(2, -1); + ops::ConcatOp op_n1({input1.getOutput(0), input2.getOutput(0)}, -1); op_n1.setInputShape(0, inShape); ASSERT_EQ(op_n1.getAxis(), 2); - ops::ConcatOp op_n3(2, -3); + ops::ConcatOp op_n3({input1.getOutput(0), input2.getOutput(0)}, -3); op_n3.setInputShape(0, inShape); ASSERT_EQ(op_n3.getAxis(), 0); } diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index 72ed2dc..3da4b24 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -98,26 +98,26 @@ namespace { /** Creates graph with one operation generated by opGen function and returns this operation node*/ -mir::Operation* fillGraph(mir::Graph& g, function opGen, - const vector>& inputNTensors) -{ - // Create operation - mir::Operation* op = opGen(g); - - int numInputs = op->getPrevNodes().size(); - assert(inputNTensors.size() == static_cast(numInputs)); +mir::Operation* fillGraph(mir::Graph& g, + function& inputs)> opGen, + const vector>& inputNTensors) { + std::vector inputs; + int numInputs = inputNTensors.size(); for (int i = 0; i < numInputs; ++i) { // Create i-th input node auto inputOp = g.create("x" + std::to_string(i)); - // Connect i-th operation input to i-th input node - op->connectInputTo(i, inputOp->getOutput(0)); - // Set input shape inputOp->setOutputShape(0, inputNTensors[i]->getShape()); + + inputs.push_back(inputOp->getOutput(0)); } + // Create operation + mir::Operation* op = opGen(g, inputs); + // Mark outputs g.markOutput(op); @@ -288,8 +288,12 @@ void compareResults(const mir::TensorVariant &ref_nnc_tensor, const Tensor &test * This function creates test graph, runs interpeter, specifies artifact operation and compares results */ template -void createAndRunTestGraph(function opGenerator, TestFunc artifactOperation, - const vector> &inputNTensors, const Args &...inputATensors) +void createAndRunTestGraph( + function& inputs)> opGenerator, + TestFunc artifactOperation, + const vector> &inputNTensors, + const Args &...inputATensors) { mir::Graph g; mir::Operation *actualOperation = fillGraph(g, opGenerator, inputNTensors); @@ -322,7 +326,9 @@ TEST(cpp_operations_test, bias) fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); mir::TensorVariant weights = createNTensor(weightsShape, 1.0f); - auto opGenerator = [weights](mir::Graph &g){return g.create("y", weights);}; + auto opGenerator = [weights](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0], weights); + }; createAndRunTestGraph(opGenerator, biasAdd, inputNTensors, aInputTensor); } @@ -336,7 +342,9 @@ TEST(cpp_operations_test, scale) fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); mir::TensorVariant weights = createNTensor(weightsShape, 1.0f); - auto opGenerator = [weights](mir::Graph &g){return g.create("y", weights);}; + auto opGenerator = [weights](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0], weights); + }; createAndRunTestGraph(opGenerator, scale, inputNTensors, aInputTensor); } @@ -350,7 +358,9 @@ TEST(cpp_operations_test, capped_relu) Tensor aInputTensor; vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); - auto opGenerator = [cap](mir::Graph &g){return g.create("y", cap);}; + auto opGenerator = [cap](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0], cap); + }; createAndRunTestGraph(opGenerator, cappedRelu, inputNTensors, aInputTensor); } @@ -371,7 +381,9 @@ TEST(cpp_operations_test, concat) vector> inputNTensors(2); fillTensors(inputNTensors[0], inputATensors[0], shape1Data, 1.0f); fillTensors(inputNTensors[1], inputATensors[1], shape2Data, 2.0f); - auto opGenerator = [axis](mir::Graph &g) { return g.create("y", 2, axis); }; + auto opGenerator = [axis](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs, axis); + }; createAndRunTestGraph(opGenerator, concat, inputNTensors, inputATensors[0], inputATensors[1]); } @@ -387,8 +399,8 @@ TEST(cpp_operations_test, add2) { vector> input_n_tensors(2); fillTensors(input_n_tensors[0], input_a_tensors[0], shape_data, 1.0f); fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f); - auto op_generator = [](mir::Graph& g) { - return g.create("y", mir::ops::ElementwiseOp::OpType::sum, 2); + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::sum); }; createAndRunTestGraph(op_generator, ElementWise, input_n_tensors, input_a_tensors[0], @@ -406,8 +418,8 @@ TEST(cpp_operations_test, mul3) { fillTensors(input_n_tensors[0], input_a_tensors[0], shape_data, 1.0f); fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f); fillTensors(input_n_tensors[2], input_a_tensors[2], shape_data, 3.0f); - auto opGenerator = [](mir::Graph& g) { - return g.create("y", mir::ops::ElementwiseOp::OpType::prod, 3); + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::prod); }; createAndRunTestGraph(opGenerator, ElementWise, input_n_tensors, input_a_tensors[0], @@ -426,8 +438,8 @@ TEST(cpp_operations_test, max4) { fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f); fillTensors(input_n_tensors[2], input_a_tensors[2], shape_data, 3.0f); fillTensors(input_n_tensors[3], input_a_tensors[3], shape_data, 3.0f); - auto opGenerator = [](mir::Graph& g) { - return g.create("y", mir::ops::ElementwiseOp::OpType::max, 4); + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::max); }; createAndRunTestGraph(opGenerator, ElementWise, input_n_tensors, input_a_tensors[0], @@ -457,10 +469,10 @@ TEST(cpp_operations_test, conv2d) fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); auto padT = mir::ops::PaddingType::Same; mir::TensorVariant kernel = createNTensor(kernelShape, 1.0f); - auto opGenerator = [kernel, strides, padT](mir::Graph &g) - { - return g.create("y", kernel, strides, padT); - }; + auto opGenerator = [kernel, strides, padT](mir::Graph& g, + const std::vector& inputs) { + return g.create("y", inputs[0], kernel, strides, padT); + }; createAndRunTestGraph(opGenerator, conv2d, inputNTensors, aInputTensor); } @@ -489,8 +501,9 @@ TEST(cpp_operations_tests, depthwise_conv) fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); auto padT = mir::ops::PaddingType::Same; mir::TensorVariant kernel = createNTensor(kernelShape, 1.0f); - auto opGenerator = [kernel, strides, padT](mir::Graph &g) { - return g.create("y", kernel, strides, padT); + auto opGenerator = [kernel, strides, padT](mir::Graph& g, + const std::vector& inputs) { + return g.create("y", inputs[0], kernel, strides, padT); }; createAndRunTestGraph(opGenerator, depthwiseConv2d, inputNTensors, aInputTensor); @@ -505,7 +518,9 @@ TEST(cpp_operations_test, fully_connected) Tensor aInputTensor; fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); mir::TensorVariant weights = createNTensor(weightsShape, 1.0f); - auto opGenerator = [weights](mir::Graph &g){return g.create("y", weights);}; + auto opGenerator = [weights](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0], weights); + }; createAndRunTestGraph(opGenerator, fullConnect, inputNTensors, aInputTensor); } @@ -534,8 +549,10 @@ static void genericPoolTest(Func testFunc, const vector("y", windowShape, strides, poolT, padT, border); + auto opGenerator = [windowShape, strides, padT, border](mir::Graph& g, + const std::vector& inputs) { + return g.create("y", inputs[0], windowShape, strides, poolT, padT, + border); }; createAndRunTestGraph(opGenerator, testFunc, inputNTensors, aInputTensor); @@ -567,7 +584,9 @@ TEST(cpp_operations_test, relu) Tensor aInputTensor; vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); - auto opGenerator = [](mir::Graph &g){return g.create("y");}; + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0]); + }; createAndRunTestGraph(opGenerator, relu, inputNTensors, aInputTensor); } @@ -578,7 +597,9 @@ TEST(cpp_operations_test, elu) { Tensor a_input_tensor; vector> input_n_tensors(1); fillTensors(input_n_tensors[0], a_input_tensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph &g){return g.create("y", 1);}; + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0], 1); + }; createAndRunTestGraph(op_generator, elu, input_n_tensors, a_input_tensor); } @@ -589,7 +610,9 @@ TEST(cpp_operations_test, tanh) { Tensor a_input_tensor; vector> input_n_tensors(1); fillTensors(input_n_tensors[0], a_input_tensor, shape_data, 1.0f); - auto op_generator = [](mir::Graph &g){return g.create("y");}; + auto op_generator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0]); + }; createAndRunTestGraph(op_generator, tanhActivation, input_n_tensors, a_input_tensor); } @@ -606,7 +629,9 @@ TEST(cpp_operations_test, softmax) Tensor aInputTensor; vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); - auto opGenerator = [axis](mir::Graph &g) { return g.create("y", axis); }; + auto opGenerator = [axis](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0], axis); + }; createAndRunTestGraph(opGenerator, softmax, inputNTensors, aInputTensor); } @@ -622,12 +647,11 @@ TEST(cpp_operations_test, reshape) Tensor aInputTensor; vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - auto opGenerator = [nOutputShape](mir::Graph &g) - { - auto op = g.create("y"); - op->setOutputShape(0, nOutputShape); - return op; - }; + auto opGenerator = [nOutputShape](mir::Graph& g, const std::vector& inputs) { + auto op = g.create("y", inputs[0]); + op->setOutputShape(0, nOutputShape); + return op; + }; createAndRunTestGraph(opGenerator, reshape, inputNTensors, aInputTensor); } diff --git a/contrib/nnc/unittests/soft_backend/Generator.cpp b/contrib/nnc/unittests/soft_backend/Generator.cpp index 7ff9214..6f64d2c 100644 --- a/contrib/nnc/unittests/soft_backend/Generator.cpp +++ b/contrib/nnc/unittests/soft_backend/Generator.cpp @@ -89,8 +89,7 @@ TEST(Generator, check_generator_call) nnc::mir::Graph g; Operation* input = g.create("input"); input->setOutputShape(0, Shape({1,2,3,4})); - Operation* output = g.create("output"); - output->connectInputTo(0, input->getOutput(0)); + Operation* output = g.create("output", input->getOutput(0)); // test that generator creates output dir and files if (isFileExists(TEST_DIR)) diff --git a/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp index 3cfc6dc..f6f3f31 100644 --- a/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/unittests/soft_backend/ModelAnalyzer.cpp @@ -47,19 +47,13 @@ TEST(ModelAnalyzer, linearization) { */ Operation* input = g.create("input"); input->setOutputShape(0, {1,2,3}); - Operation* head1 = g.create("head1"); - Operation* head2 = g.create("head2"); - Operation* tail1 = g.create("tail1"); - Operation* tail2 = g.create("tail2"); - Operation* join = g.create("join", 2, 0); - - // connect corresponding nodes in net - head1->connectInputTo(0, input->getOutput(0)); - head2->connectInputTo(0, input->getOutput(0)); - tail1->connectInputTo(0, head1->getOutput(0)); - tail2->connectInputTo(0, head2->getOutput(0)); - join->connectInputTo(0, tail1->getOutput(0)); - join->connectInputTo(1, tail1->getOutput(0)); + Operation* head1 = g.create("head1", input->getOutput(0)); + Operation* head2 = g.create("head2", input->getOutput(0)); + Operation* tail1 = g.create("tail1", head1->getOutput(0)); + Operation* tail2 = g.create("tail2", head2->getOutput(0)); + Operation* join = g.create("join", std::vector{tail1->getOutput(0), + tail2->getOutput(0)}, + 0); ShapeInference si; g.accept(&si); -- 2.7.4