From a7160a5170bdda2948e0c866048fdde19ef674b7 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A0=D0=BE=D0=BC=D0=B0=D0=BD=20=D0=9C=D0=B8=D1=85=D0=B0?= =?utf8?q?=D0=B9=D0=BB=D0=BE=D0=B2=D0=B8=D1=87=20=D0=A0=D1=83=D1=81=D1=8F?= =?utf8?q?=D0=B5=D0=B2/AI=20Tools=20Lab=20/SRR/Staff=20Engineer/=EC=82=BC?= =?utf8?q?=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 2 Oct 2018 13:01:37 +0300 Subject: [PATCH] Eliminate nested namespaces in nnc project (#1710) * Eliminate nested namespaces in nnc project * replace core with mir * replace nncc with nnc * replace clopt with cli * eliminate namespaces: - contrib - model - IR - data - backend - soft - dumper - ADT - interpreter - frontend - pass - common - util - impl * remove `using` directives in some headers Signed-off-by: Roman Rusyaev --- contrib/nnc/core/modelIR/Index.cpp | 14 +- contrib/nnc/core/modelIR/Shape.cpp | 14 +- contrib/nnc/core/modelIR/ShapeInference.cpp | 49 +++--- contrib/nnc/core/modelIR/Tensor.cpp | 14 +- contrib/nnc/core/modelIR/TensorVariant.cpp | 14 +- contrib/nnc/core/modelIR/graph.cpp | 16 +- contrib/nnc/core/modelIR/ir_dot_builder.cpp | 14 +- contrib/nnc/core/modelIR/ir_dot_dumper.cpp | 16 +- contrib/nnc/core/modelIR/ir_dot_node_info.cpp | 14 +- contrib/nnc/core/modelIR/ir_node.cpp | 29 ++-- contrib/nnc/core/modelIR/operation.cpp | 18 +-- contrib/nnc/core/modelIR/visitor.cpp | 43 +++--- contrib/nnc/core/serialize/Deserializer.cpp | 15 +- contrib/nnc/core/serialize/Serializer.cpp | 14 +- contrib/nnc/core/serialize/proto/model_ir.proto | 2 +- contrib/nnc/driver/Driver.cpp | 39 ++--- contrib/nnc/driver/Driver.h | 7 +- contrib/nnc/driver/Options.cpp | 11 +- contrib/nnc/driver/main.cpp | 5 +- contrib/nnc/examples/caffe_frontend/model_dump.cpp | 13 +- .../nnc/examples/tflite_frontend/sanity_check.cpp | 17 +- contrib/nnc/include/core/modelIR/ExternalRegion.h | 14 +- contrib/nnc/include/core/modelIR/Index.h | 14 +- contrib/nnc/include/core/modelIR/Region.h | 14 +- contrib/nnc/include/core/modelIR/Shape.h | 14 +- contrib/nnc/include/core/modelIR/ShapeInference.h | 49 +++--- contrib/nnc/include/core/modelIR/ShapeRange.h | 17 +- contrib/nnc/include/core/modelIR/Tensor.h | 18 +-- contrib/nnc/include/core/modelIR/TensorUtil.h | 23 +-- contrib/nnc/include/core/modelIR/TensorVariant.h | 17 +- contrib/nnc/include/core/modelIR/graph.h | 17 +- contrib/nnc/include/core/modelIR/ir_dot_builder.h | 16 +- contrib/nnc/include/core/modelIR/ir_dot_dumper.h | 17 +- .../nnc/include/core/modelIR/ir_dot_node_info.h | 17 +- contrib/nnc/include/core/modelIR/ir_node.h | 33 ++-- .../include/core/modelIR/operations/batch_norm.h | 17 +- .../include/core/modelIR/operations/bias_add_op.h | 17 +- .../core/modelIR/operations/capped_relu_op.h | 19 +-- .../nnc/include/core/modelIR/operations/common.h | 17 +- .../include/core/modelIR/operations/concat_op.h | 17 +- .../include/core/modelIR/operations/conv_2d_op.h | 19 +-- .../core/modelIR/operations/depthwise_conv2d_op.h | 19 +-- .../include/core/modelIR/operations/dropout_op.h | 19 +-- .../core/modelIR/operations/fully_connected_op.h | 19 +-- .../include/core/modelIR/operations/operation.h | 31 ++-- .../nnc/include/core/modelIR/operations/pool_op.h | 19 +-- .../nnc/include/core/modelIR/operations/relu_op.h | 17 +- .../include/core/modelIR/operations/reshape_op.h | 17 +- .../nnc/include/core/modelIR/operations/scale_op.h | 17 +- .../include/core/modelIR/operations/softmax_op.h | 17 +- .../include/core/modelIR/operations/variable_op.h | 17 +- contrib/nnc/include/core/modelIR/visitor.h | 77 +++++----- contrib/nnc/include/core/serialize/Deserializer.h | 15 +- contrib/nnc/include/core/serialize/Serializer.h | 15 +- contrib/nnc/include/option/Options.h | 11 +- contrib/nnc/include/pass/Pass.h | 10 +- contrib/nnc/include/pass/PassData.h | 23 +-- contrib/nnc/include/pass/PassException.h | 10 +- contrib/nnc/include/pass/PassManager.h | 10 +- .../passes/acl_soft_backend/AclCPPGenerator.h | 14 +- .../acl_soft_backend/ArtifactGeneratorCppCode.h | 13 +- .../acl_soft_backend/ArtifactGeneratorCppDecl.h | 13 +- .../passes/acl_soft_backend/ArtifactModel.h | 13 +- .../passes/acl_soft_backend/IArtifactGenerator.h | 13 +- .../include/passes/caffe_frontend/CaffeFrontend.h | 11 +- .../passes/common_frontend/model_allocation.h | 13 +- .../include/passes/common_frontend/nn_importer.h | 13 +- .../include/passes/common_frontend/shape_helper.h | 23 +-- .../nnc/include/passes/interpreter/Interpreter.h | 55 +++---- .../include/passes/interpreter/InterpreterPass.h | 19 +-- .../include/passes/soft_backend/BaseGenerator.h | 14 +- .../nnc/include/passes/soft_backend/CGenerator.h | 13 +- .../nnc/include/passes/soft_backend/CPPGenerator.h | 13 +- .../passes/tflite_frontend/TfliteFrontend.h | 11 +- contrib/nnc/include/support/CommandLine.h | 11 +- contrib/nnc/pass/PassManager.cpp | 10 +- .../passes/acl_soft_backend/AclCPPGenerator.cpp | 13 +- .../acl_soft_backend/ArtifactGeneratorCppCode.cpp | 22 +-- .../acl_soft_backend/ArtifactGeneratorCppDecl.cpp | 6 +- .../nnc/passes/acl_soft_backend/ArtifactModel.cpp | 1 - .../passes/caffe_frontend/caffe_dump_visitor.cpp | 10 +- .../nnc/passes/caffe_frontend/caffe_dump_visitor.h | 10 +- .../nnc/passes/caffe_frontend/caffe_frontend.cpp | 18 +-- .../nnc/passes/caffe_frontend/caffe_importer.cpp | 12 +- contrib/nnc/passes/caffe_frontend/caffe_importer.h | 12 +- .../passes/caffe_frontend/caffe_model_visitor.cpp | 23 +-- .../passes/caffe_frontend/caffe_model_visitor.h | 49 +++--- .../nnc/passes/caffe_frontend/caffe_op_creator.cpp | 39 ++--- .../nnc/passes/caffe_frontend/caffe_op_creator.h | 20 +-- contrib/nnc/passes/caffe_frontend/caffe_visitor.h | 10 +- contrib/nnc/passes/caffe_frontend/caffe_walker.cpp | 10 +- contrib/nnc/passes/caffe_frontend/caffe_walker.h | 10 +- contrib/nnc/passes/caffe_frontend/proto_reader.cpp | 13 +- contrib/nnc/passes/caffe_frontend/proto_reader.h | 13 +- .../passes/common_frontend/model_allocation.cpp | 7 +- .../nnc/passes/common_frontend/shape_helper.cpp | 16 +- contrib/nnc/passes/interpreter/Interpreter.cpp | 78 ++++------ .../nnc/passes/interpreter/interpreter_pass.cpp | 40 ++--- contrib/nnc/passes/interpreter/ops/BatchNorm.h | 28 +--- contrib/nnc/passes/interpreter/ops/Bias.h | 28 +--- contrib/nnc/passes/interpreter/ops/Concat.h | 26 +--- .../passes/interpreter/ops/Depthwise_conv_2D.cpp | 19 +-- .../nnc/passes/interpreter/ops/Depthwise_conv_2D.h | 35 ++--- contrib/nnc/passes/interpreter/ops/Dropout.h | 30 +--- contrib/nnc/passes/interpreter/ops/Elementwise.h | 33 ++-- contrib/nnc/passes/interpreter/ops/Fill.h | 22 +-- .../nnc/passes/interpreter/ops/FullyConnected.h | 40 ++--- contrib/nnc/passes/interpreter/ops/OperationImpl.h | 27 +--- contrib/nnc/passes/interpreter/ops/Pool.cpp | 19 +-- contrib/nnc/passes/interpreter/ops/Pool.h | 27 +--- contrib/nnc/passes/interpreter/ops/Reduce.h | 30 ++-- contrib/nnc/passes/interpreter/ops/Reshape.h | 38 ++--- contrib/nnc/passes/interpreter/ops/Scale.cpp | 22 +-- contrib/nnc/passes/interpreter/ops/Scale.h | 26 +--- contrib/nnc/passes/interpreter/ops/Softmax.h | 36 ++--- contrib/nnc/passes/interpreter/ops/common.cpp | 18 +-- contrib/nnc/passes/interpreter/ops/common.h | 24 +-- contrib/nnc/passes/interpreter/ops/conv_2D.cpp | 19 +-- contrib/nnc/passes/interpreter/ops/conv_2D.h | 34 ++-- contrib/nnc/passes/interpreter/ops/conv_FFT.cpp | 19 +-- contrib/nnc/passes/interpreter/ops/conv_FFT.h | 49 ++---- contrib/nnc/passes/soft_backend/BaseGenerator.cpp | 24 +-- contrib/nnc/passes/soft_backend/CGenerator.cpp | 15 +- contrib/nnc/passes/soft_backend/CPPGenerator.cpp | 19 +-- contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp | 52 +++---- contrib/nnc/passes/soft_backend/ModelAnalyzer.h | 57 +++---- contrib/nnc/passes/soft_backend/SBSerializer.cpp | 58 ++++--- contrib/nnc/passes/soft_backend/SBSerializer.h | 51 +++--- .../passes/tflite_frontend/tflite_dump_visitor.cpp | 10 +- .../passes/tflite_frontend/tflite_dump_visitor.h | 8 +- .../nnc/passes/tflite_frontend/tflite_frontend.cpp | 18 +-- .../tflite_frontend/tflite_importer.inline.h | 1 - .../passes/tflite_frontend/tflite_ir_visitor.cpp | 25 ++- .../nnc/passes/tflite_frontend/tflite_ir_visitor.h | 16 +- .../passes/tflite_frontend/tflite_op_creator.cpp | 13 +- .../nnc/passes/tflite_frontend/tflite_op_creator.h | 20 +-- .../passes/tflite_frontend/tflite_v3_importer.cpp | 10 +- .../passes/tflite_frontend/tflite_v3_importer.h | 10 +- .../nnc/passes/tflite_frontend/tflite_visitor.h | 10 +- .../nnc/passes/tflite_frontend/tflite_walker.cpp | 10 +- contrib/nnc/passes/tflite_frontend/tflite_walker.h | 10 +- contrib/nnc/support/CLOptionChecker.cpp | 13 +- contrib/nnc/support/CommandLine.cpp | 13 +- contrib/nnc/tests/import/caffe.cpp | 8 +- contrib/nnc/tests/import/tflite.cpp | 8 +- contrib/nnc/tests/interpreter/graph_creator.cpp | 5 +- contrib/nnc/tests/interpreter/graph_creator.h | 2 +- contrib/nnc/tests/interpreter/op_info_util.cpp | 22 +-- contrib/nnc/tests/interpreter/op_info_util.h | 18 +-- contrib/nnc/tests/interpreter/op_test.cpp | 6 +- contrib/nnc/tests/soft_backend/CompileCPP.cpp | 14 +- contrib/nnc/unittests/core/ShapeInference.cpp | 3 +- contrib/nnc/unittests/core/ShapeRange.cpp | 4 +- contrib/nnc/unittests/core/TensorVariant.cpp | 3 +- contrib/nnc/unittests/core/deserializer.cpp | 4 +- contrib/nnc/unittests/core/ir_node.cpp | 3 +- contrib/nnc/unittests/core/operation.cpp | 3 +- contrib/nnc/unittests/core/serializer.cpp | 4 +- contrib/nnc/unittests/pass/PassExceptionTest.cpp | 2 +- contrib/nnc/unittests/pass/PassManagerTest.cpp | 13 +- .../nnc/unittests/soft_backend/CPPOperations.cpp | 171 ++++++++++----------- contrib/nnc/unittests/soft_backend/Generator.cpp | 13 +- contrib/nnc/unittests/support/CommandLineTest.cpp | 2 +- 163 files changed, 1042 insertions(+), 2209 deletions(-) diff --git a/contrib/nnc/core/modelIR/Index.cpp b/contrib/nnc/core/modelIR/Index.cpp index 89ed876..6efb9dd 100644 --- a/contrib/nnc/core/modelIR/Index.cpp +++ b/contrib/nnc/core/modelIR/Index.cpp @@ -18,13 +18,9 @@ #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { Index::Index(std::initializer_list &&l) : _indices{l} @@ -62,7 +58,5 @@ std::ostream &operator<<(std::ostream &s, const Index &sh) return s; } -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/Shape.cpp b/contrib/nnc/core/modelIR/Shape.cpp index 083b1df..fdac281 100644 --- a/contrib/nnc/core/modelIR/Shape.cpp +++ b/contrib/nnc/core/modelIR/Shape.cpp @@ -18,13 +18,9 @@ #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { Shape::Shape(std::initializer_list &&l) : _dims{l} @@ -105,7 +101,5 @@ std::ostream &operator<<(std::ostream &s, const Shape &sh) return s; } -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/ShapeInference.cpp b/contrib/nnc/core/modelIR/ShapeInference.cpp index e64ab76..a3e29ae 100644 --- a/contrib/nnc/core/modelIR/ShapeInference.cpp +++ b/contrib/nnc/core/modelIR/ShapeInference.cpp @@ -33,18 +33,12 @@ #include "core/modelIR/operations/scale_op.h" #include "core/modelIR/operations/dropout_op.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { -using nncc::contrib::core::data::Shape; +using nnc::mir::Shape; template void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &outShape) @@ -95,7 +89,7 @@ void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &ou op.setPadding(inRank - 1, 0); } -void ShapeInference::visit(ADT::INode::Ref node, ops::ConcatOp &op) +void ShapeInference::visit(INode::Ref node, ops::ConcatOp &op) { fillInputShapes(node, op); @@ -117,7 +111,7 @@ void ShapeInference::visit(ADT::INode::Ref node, ops::ConcatOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(ADT::INode::Ref node, ops::Conv2DOp &op) +void ShapeInference::visit(INode::Ref node, ops::Conv2DOp &op) { fillInputShapes(node, op); @@ -131,14 +125,14 @@ void ShapeInference::visit(ADT::INode::Ref node, ops::Conv2DOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(ADT::INode::Ref node, ops::VariableOp &op) +void ShapeInference::visit(INode::Ref node, ops::VariableOp &op) { (void)op; (void)node; // No need to do anything for inputs. These should be set by user } -void ShapeInference::fillInputShapes(ADT::INode::Ref node, OpDescription &op) +void ShapeInference::fillInputShapes(INode::Ref node, OpDescription &op) { uint32_t i = 0; for (auto &in : node->getPrevNodes()) @@ -148,19 +142,19 @@ void ShapeInference::fillInputShapes(ADT::INode::Ref node, OpDescription &op) } } -void ShapeInference::visit(ADT::INode::Ref node, ops::ReluOp &op) +void ShapeInference::visit(INode::Ref node, ops::ReluOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(ADT::INode::Ref node, ops::SoftmaxOp &op) +void ShapeInference::visit(INode::Ref node, ops::SoftmaxOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(ADT::INode::Ref node, ops::PoolOp &op) +void ShapeInference::visit(INode::Ref node, ops::PoolOp &op) { fillInputShapes(node, op); @@ -177,7 +171,7 @@ void ShapeInference::visit(ADT::INode::Ref node, ops::PoolOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) +void ShapeInference::visit(INode::Ref node, ops::FullyConnectedOp &op) { fillInputShapes(node, op); const Shape &inShape = op.getInputShape(0); @@ -199,13 +193,13 @@ void ShapeInference::visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(ADT::INode::Ref node, ops::CappedReluOp &op) +void ShapeInference::visit(INode::Ref node, ops::CappedReluOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) +void ShapeInference::visit(INode::Ref node, ops::DepthwiseConv2DOp &op) { fillInputShapes(node, op); @@ -225,13 +219,13 @@ void ShapeInference::visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(ADT::INode::Ref node, ops::BiasAddOp &op) +void ShapeInference::visit(INode::Ref node, ops::BiasAddOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(ADT::INode::Ref node, ops::ReshapeOp &op) +void ShapeInference::visit(INode::Ref node, ops::ReshapeOp &op) { // Reshape should have it's output shape filled by importer/user fillInputShapes(node, op); @@ -258,26 +252,23 @@ void ShapeInference::visit(ADT::INode::Ref node, ops::ReshapeOp &op) op.setOutputShape(0, outShape); } -void ShapeInference::visit(ADT::INode::Ref node, ops::ScaleOp &op) +void ShapeInference::visit(INode::Ref node, ops::ScaleOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(ADT::INode::Ref node, ops::DropoutOp &op) +void ShapeInference::visit(INode::Ref node, ops::DropoutOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -void ShapeInference::visit(ADT::INode::Ref node, ops::BatchNormOp &op) +void ShapeInference::visit(INode::Ref node, ops::BatchNormOp &op) { fillInputShapes(node, op); op.setOutputShape(0, op.getInputShape(0)); } -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/Tensor.cpp b/contrib/nnc/core/modelIR/Tensor.cpp index db0b6bc..2beceee 100644 --- a/contrib/nnc/core/modelIR/Tensor.cpp +++ b/contrib/nnc/core/modelIR/Tensor.cpp @@ -16,20 +16,14 @@ #include "core/modelIR/Tensor.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { template class Tensor; template class Tensor; template class Tensor; -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/TensorVariant.cpp b/contrib/nnc/core/modelIR/TensorVariant.cpp index a7770b3..0ec27b1 100644 --- a/contrib/nnc/core/modelIR/TensorVariant.cpp +++ b/contrib/nnc/core/modelIR/TensorVariant.cpp @@ -16,13 +16,9 @@ #include "core/modelIR/TensorVariant.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace ADT +namespace mir { TensorVariant::TensorVariant(const Shape& shape, const std::shared_ptr& data, TensorVariant::DTYPE dtype, size_t element_size) @@ -51,7 +47,5 @@ size_t TensorVariant::getOffset(const Index &idx) const { return offset; } -} // namespace ADT -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/graph.cpp b/contrib/nnc/core/modelIR/graph.cpp index 94882e8..43613fa 100644 --- a/contrib/nnc/core/modelIR/graph.cpp +++ b/contrib/nnc/core/modelIR/graph.cpp @@ -21,15 +21,10 @@ #include "core/modelIR/ir_node.h" #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace IR -{ -namespace model { INode::Ref Graph::getInput(const std::string &name) { auto it = _inputs.find(name); @@ -94,8 +89,5 @@ void Graph::markOutput(INode::Ref node) { _outputs[node->getName()] = node; } -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/ir_dot_builder.cpp b/contrib/nnc/core/modelIR/ir_dot_builder.cpp index 4eb6c73..6d4ffb5 100644 --- a/contrib/nnc/core/modelIR/ir_dot_builder.cpp +++ b/contrib/nnc/core/modelIR/ir_dot_builder.cpp @@ -16,13 +16,9 @@ #include "core/modelIR/ir_dot_builder.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace dumper +namespace mir { void IrDotBuilder::updateWithNode(INode *node, const DotIrNodeInfo &irNodeInfo) @@ -49,7 +45,5 @@ void IrDotBuilder::addEdge(INode *node1, INode *node2) dot << node1->getId() << " -> " << node2->getId() << ";" << std::endl; } -} // namespace dumper -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/ir_dot_dumper.cpp b/contrib/nnc/core/modelIR/ir_dot_dumper.cpp index 7a64fa3..892c523 100644 --- a/contrib/nnc/core/modelIR/ir_dot_dumper.cpp +++ b/contrib/nnc/core/modelIR/ir_dot_dumper.cpp @@ -22,16 +22,10 @@ #include "core/modelIR/ir_dot_node_info.h" #include "core/modelIR/ir_dot_dumper.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace dumper -{ - -using namespace nncc::contrib::core::data; static std::vector getInputShapes(OpDescription &op) { @@ -197,7 +191,5 @@ void IrDotDumper::visit(INode *node, ops::DropoutOp &op) dotBuilder.updateWithNode(node, nodeInfo); } -} // namespace dumper -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/ir_dot_node_info.cpp b/contrib/nnc/core/modelIR/ir_dot_node_info.cpp index ace7ff9..ffb42e6 100644 --- a/contrib/nnc/core/modelIR/ir_dot_node_info.cpp +++ b/contrib/nnc/core/modelIR/ir_dot_node_info.cpp @@ -19,13 +19,9 @@ #include "core/modelIR/ir_dot_node_info.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace dumper +namespace mir { DotIrNodeInfo &DotIrNodeInfo::withType(const std::string &typeName, const std::string &nodeName) @@ -257,7 +253,5 @@ void DotIrNodeInfo::addPipeIfNeeded(std::stringstream &ss, bool needed, bool &ne } } -} // namespace dumper -} // namespace core -} // namespace contrib -} // namespace nncc \ No newline at end of file +} // namespace mir +} // namespace nnc \ No newline at end of file diff --git a/contrib/nnc/core/modelIR/ir_node.cpp b/contrib/nnc/core/modelIR/ir_node.cpp index 9e94458..13fa93d 100644 --- a/contrib/nnc/core/modelIR/ir_node.cpp +++ b/contrib/nnc/core/modelIR/ir_node.cpp @@ -16,25 +16,19 @@ #include "core/modelIR/ir_node.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { -const std::vector &ADT::AbstractNode::getNextNodes() const { return _outputs; } +const std::vector &AbstractNode::getNextNodes() const { return _outputs; } -const std::vector &ADT::AbstractNode::getPrevNodes() const +const std::vector &AbstractNode::getPrevNodes() const { return _inputs; } -void ADT::AbstractNode::connectInputTo(const int inputIndex, const IODescriptor &descriptor) +void AbstractNode::connectInputTo(const int inputIndex, const IODescriptor &descriptor) { AbstractNode *buf_ptr = dynamic_cast(descriptor.node); assert(buf_ptr); @@ -42,19 +36,16 @@ void ADT::AbstractNode::connectInputTo(const int inputIndex, const IODescriptor _inputs[inputIndex] = descriptor; } -void ADT::AbstractNode::addNextNode(ADT::INode::Ref const node) { _outputs.emplace_back(node); } +void AbstractNode::addNextNode(INode::Ref const node) { _outputs.emplace_back(node); } -const ADT::INode::IODescriptor ADT::AbstractNode::getOutput(size_t index) +const INode::IODescriptor AbstractNode::getOutput(size_t index) { return IODescriptor{.node = this, .index = index}; } -ADT::AbstractNode::AbstractNode(size_t num_inputs) { +AbstractNode::AbstractNode(size_t num_inputs) { _inputs.resize(num_inputs); } -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/operation.cpp b/contrib/nnc/core/modelIR/operation.cpp index a98055a..8eea4c7 100644 --- a/contrib/nnc/core/modelIR/operation.cpp +++ b/contrib/nnc/core/modelIR/operation.cpp @@ -18,13 +18,10 @@ #include "core/modelIR/operations/operation.h" -namespace nncc { -namespace contrib { -namespace core { -namespace IR { -namespace model { - -using namespace nncc::contrib::core::data; +namespace nnc +{ +namespace mir +{ const Shape &OpDescription::getInputShape(const size_t index) const { assert(index < getNumInputs()); @@ -54,8 +51,5 @@ size_t OpDescription::getNumInputs() const { return _max_inputs; } size_t OpDescription::getNumOutputs() const { return _max_outputs; } -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/modelIR/visitor.cpp b/contrib/nnc/core/modelIR/visitor.cpp index 1721246..aca61cf 100644 --- a/contrib/nnc/core/modelIR/visitor.cpp +++ b/contrib/nnc/core/modelIR/visitor.cpp @@ -19,29 +19,24 @@ #include "core/modelIR/visitor.h" -namespace nncc { -namespace contrib { -namespace core { -namespace IR { -namespace model { +namespace nnc +{ +namespace mir +{ -void Visitor::visit(ADT::INode *node, ops::ConcatOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::Conv2DOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::SoftmaxOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::PoolOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::FullyConnectedOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::CappedReluOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::BiasAddOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::VariableOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::ReluOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::ReshapeOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::ScaleOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::BatchNormOp &op) {(void)node; (void)op;}; -void Visitor::visit(ADT::INode *node, ops::DropoutOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::Conv2DOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::DepthwiseConv2DOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::SoftmaxOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::PoolOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::FullyConnectedOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::CappedReluOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::BiasAddOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::VariableOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::ReluOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::ReshapeOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::ScaleOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::BatchNormOp &op) {(void)node; (void)op;}; +void Visitor::visit(INode *node, ops::DropoutOp &op) {(void)node; (void)op;}; -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/serialize/Deserializer.cpp b/contrib/nnc/core/serialize/Deserializer.cpp index d91188d..1a21fa1 100644 --- a/contrib/nnc/core/serialize/Deserializer.cpp +++ b/contrib/nnc/core/serialize/Deserializer.cpp @@ -19,12 +19,10 @@ #include "core/modelIR/ShapeRange.h" -namespace nncc { -namespace contrib { -namespace core { - -using namespace nncc::contrib::core::data; -using namespace nncc::contrib::core::ADT; +namespace nnc +{ +namespace mir +{ // // Shape Deserialization @@ -142,6 +140,5 @@ TensorVariant Deserializer::deserializeFromString (const std::str return deserializeFromMessage(objectAsMessage); } -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/core/serialize/Serializer.cpp b/contrib/nnc/core/serialize/Serializer.cpp index 06cacf5..41453dd 100644 --- a/contrib/nnc/core/serialize/Serializer.cpp +++ b/contrib/nnc/core/serialize/Serializer.cpp @@ -19,11 +19,10 @@ #include "core/modelIR/ShapeRange.h" -namespace nncc { -namespace contrib { -namespace core { - -using namespace nncc::contrib::core::data; +namespace nnc +{ +namespace mir +{ template void Serializer::serializeToStream(const T& obj, std::ostream& stream) @@ -96,6 +95,5 @@ std::string Serializer >::getSerializedObject (const TensorregisterPass(pass); @@ -106,17 +100,17 @@ static void registerBackendPass() { Pass *pass; - if ( clopt::target == NNC_TARGET_X86_CPP ) + if ( cli::target == NNC_TARGET_X86_CPP ) { - pass = &soft::CPPCodeGenerator::getInstance(); + pass = &CPPCodeGenerator::getInstance(); } - else if (clopt::target == NNC_TARGET_ARM_GPU_CPP ) + else if (cli::target == NNC_TARGET_ARM_GPU_CPP ) { - pass = &soft::AclCPPCodeGenerator::getInstance(); + pass = &AclCPPCodeGenerator::getInstance(); } - else if ( clopt::target == NNC_TARGET_INTERPRETER ) + else if ( cli::target == NNC_TARGET_INTERPRETER ) { - pass = &interpreter::InterpreterPass::getInstance(); + pass = &InterpreterPass::getInstance(); } else { @@ -138,5 +132,4 @@ void Driver::runDriver() } // runDriver -} // contrib -} // nncc +} // namespace nnc diff --git a/contrib/nnc/driver/Driver.h b/contrib/nnc/driver/Driver.h index a7ef0d7..545502d 100644 --- a/contrib/nnc/driver/Driver.h +++ b/contrib/nnc/driver/Driver.h @@ -19,9 +19,7 @@ #include -namespace nncc -{ -namespace contrib +namespace nnc { /** @@ -60,7 +58,6 @@ public: }; -} // contrib -} // nncc +} // namespace nnc #endif //NNCC_DRIVER_H diff --git a/contrib/nnc/driver/Options.cpp b/contrib/nnc/driver/Options.cpp index c6b8201..4347135 100644 --- a/contrib/nnc/driver/Options.cpp +++ b/contrib/nnc/driver/Options.cpp @@ -20,11 +20,9 @@ #include "option/Options.h" #include "Definitions.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace clopt +namespace cli { /** @@ -117,6 +115,5 @@ Option> interOutNode(optname("--output-node"), std::vector{}, optional(true)); -} // namespace clopt -} // namespace contrib -} // namespace nncc +} // namespace cli +} // namespace nnc diff --git a/contrib/nnc/driver/main.cpp b/contrib/nnc/driver/main.cpp index 4a8a0a9..4c8df27 100644 --- a/contrib/nnc/driver/main.cpp +++ b/contrib/nnc/driver/main.cpp @@ -21,8 +21,7 @@ #include "pass/PassException.h" #include "Driver.h" -using namespace nncc::contrib; -using namespace nncc::contrib::pass; +using namespace nnc; int main(int argc, const char *argv[]) { @@ -31,7 +30,7 @@ int main(int argc, const char *argv[]) try { // Parse command line - clopt::CommandLine::getParser()->parseCommandLine(argc, argv); + cli::CommandLine::getParser()->parseCommandLine(argc, argv); // // run compiler pipeline: diff --git a/contrib/nnc/examples/caffe_frontend/model_dump.cpp b/contrib/nnc/examples/caffe_frontend/model_dump.cpp index 6b62107..55c8dde 100644 --- a/contrib/nnc/examples/caffe_frontend/model_dump.cpp +++ b/contrib/nnc/examples/caffe_frontend/model_dump.cpp @@ -24,10 +24,9 @@ #include "core/modelIR/ShapeInference.h" #include "pass/PassException.h" -using namespace nncc::contrib; -using namespace nncc::contrib::pass; -using namespace nncc::contrib::clopt; -using namespace nncc::contrib::core::dumper; +using namespace nnc; +using namespace nnc::mir; +using namespace nnc::cli; enum Format {FormatDot, FormatDump}; @@ -38,10 +37,10 @@ static Option isDumpFormat(optname("--dump"), int main(int argc, const char **argv) { - clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false); - std::string model = clopt::inputFile; + cli::CommandLine::getParser()->parseCommandLine(argc, argv, false); + std::string model = cli::inputFile; - nncc::contrib::frontend::caffe::CaffeImporter importer{model}; + nnc::caffe::CaffeImporter importer{model}; if (!importer.import()) { diff --git a/contrib/nnc/examples/tflite_frontend/sanity_check.cpp b/contrib/nnc/examples/tflite_frontend/sanity_check.cpp index 31c3e32..b104a03 100644 --- a/contrib/nnc/examples/tflite_frontend/sanity_check.cpp +++ b/contrib/nnc/examples/tflite_frontend/sanity_check.cpp @@ -24,10 +24,9 @@ #include "core/modelIR/ir_dot_dumper.h" #include "core/modelIR/ShapeInference.h" -using namespace nncc::contrib; -using namespace nncc::contrib::pass; -using namespace nncc::contrib::clopt; -using namespace nncc::contrib::core::dumper; +using namespace nnc; +using namespace nnc::mir; +using namespace nnc::cli; enum Format {FormatDot, FormatDump}; @@ -38,10 +37,10 @@ static Option isDumpFormat(optname("--dump"), int main(int argc, const char **argv) { - clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false); - std::string model = clopt::inputFile; + cli::CommandLine::getParser()->parseCommandLine(argc, argv, false); + std::string model = cli::inputFile; - nncc::contrib::frontend::tflite::v3::TfliteImporter importer{model}; + nnc::tflite::v3::TfliteImporter importer{model}; if (!importer.import()) { @@ -60,8 +59,8 @@ int main(int argc, const char **argv) try { IrDotDumper dotDumper; - ShapeInference inf; - auto g = static_cast(importer.createIR()); + mir::ShapeInference inf; + auto g = static_cast(importer.createIR()); g->accept(&inf); g->accept(&dotDumper); diff --git a/contrib/nnc/include/core/modelIR/ExternalRegion.h b/contrib/nnc/include/core/modelIR/ExternalRegion.h index e597a8d..a5d94ec 100644 --- a/contrib/nnc/include/core/modelIR/ExternalRegion.h +++ b/contrib/nnc/include/core/modelIR/ExternalRegion.h @@ -19,13 +19,9 @@ #include "core/modelIR/Region.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { template class ExternalRegion final : public Region @@ -45,9 +41,7 @@ private: uint32_t const _size; }; -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // _NNC_CORE_LINALG_EXTERNAL_REGION_H_ diff --git a/contrib/nnc/include/core/modelIR/Index.h b/contrib/nnc/include/core/modelIR/Index.h index 04f2344..4319858 100644 --- a/contrib/nnc/include/core/modelIR/Index.h +++ b/contrib/nnc/include/core/modelIR/Index.h @@ -22,13 +22,9 @@ #include #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { class Index @@ -56,9 +52,7 @@ private: std::ostream &operator<<(std::ostream &s, const Index &sh); -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // _NNC_CORE_LINALG_INDEX_H_ diff --git a/contrib/nnc/include/core/modelIR/Region.h b/contrib/nnc/include/core/modelIR/Region.h index de78a87..96978bc 100644 --- a/contrib/nnc/include/core/modelIR/Region.h +++ b/contrib/nnc/include/core/modelIR/Region.h @@ -19,13 +19,9 @@ #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { template @@ -38,9 +34,7 @@ struct Region virtual uint32_t size(void) const = 0; }; -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // _NNC_CORE_LINALG_REGION_H_ diff --git a/contrib/nnc/include/core/modelIR/Shape.h b/contrib/nnc/include/core/modelIR/Shape.h index ad73d20..5839e78 100644 --- a/contrib/nnc/include/core/modelIR/Shape.h +++ b/contrib/nnc/include/core/modelIR/Shape.h @@ -22,13 +22,9 @@ #include #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { class Shape @@ -61,9 +57,7 @@ Shape squeeze(const Shape &); bool operator==(const Shape &, const Shape &); std::ostream &operator<<(std::ostream &s, const Shape &sh); -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // _NNC_CORE_LINALG_SHAPE_H_ diff --git a/contrib/nnc/include/core/modelIR/ShapeInference.h b/contrib/nnc/include/core/modelIR/ShapeInference.h index e691fc8..7e37f07 100644 --- a/contrib/nnc/include/core/modelIR/ShapeInference.h +++ b/contrib/nnc/include/core/modelIR/ShapeInference.h @@ -21,46 +21,35 @@ #include "core/modelIR/visitor.h" #include "core/modelIR/ir_node.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace IR -{ -namespace model -{ - -using namespace nncc::contrib::core::IR::model; class ShapeInference : public IVisitor { public: static const auto AUTO_DIM = std::numeric_limits::max(); - void visit(ADT::INode::Ref node, ops::ConcatOp &op) override; - void visit(ADT::INode::Ref node, ops::Conv2DOp &op) override; - void visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) override; - void visit(ADT::INode::Ref node, ops::ReluOp &op) override; - void visit(ADT::INode::Ref node, ops::SoftmaxOp &op) override; - void visit(ADT::INode::Ref node, ops::PoolOp &op) override; - void visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) override; - void visit(ADT::INode::Ref node, ops::CappedReluOp &op) override; - void visit(ADT::INode::Ref node, ops::BiasAddOp &op) override; - void visit(ADT::INode::Ref node, ops::ReshapeOp &op) override; - void visit(ADT::INode::Ref node, ops::VariableOp &op) override; - void visit(ADT::INode *node, ops::ScaleOp &op) override; - void visit(ADT::INode *node, ops::BatchNormOp &op) override; - void visit(ADT::INode *node, ops::DropoutOp &op) override; + void visit(INode::Ref node, ops::ConcatOp &op) override; + void visit(INode::Ref node, ops::Conv2DOp &op) override; + void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override; + void visit(INode::Ref node, ops::ReluOp &op) override; + void visit(INode::Ref node, ops::SoftmaxOp &op) override; + void visit(INode::Ref node, ops::PoolOp &op) override; + void visit(INode::Ref node, ops::FullyConnectedOp &op) override; + void visit(INode::Ref node, ops::CappedReluOp &op) override; + void visit(INode::Ref node, ops::BiasAddOp &op) override; + void visit(INode::Ref node, ops::ReshapeOp &op) override; + void visit(INode::Ref node, ops::VariableOp &op) override; + void visit(INode *node, ops::ScaleOp &op) override; + void visit(INode *node, ops::BatchNormOp &op) override; + void visit(INode *node, ops::DropoutOp &op) override; protected: - void fillInputShapes(ADT::INode::Ref node, OpDescription &op); + void fillInputShapes(INode::Ref node, OpDescription &op); }; -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_SHAPE_INFERENCE_ diff --git a/contrib/nnc/include/core/modelIR/ShapeRange.h b/contrib/nnc/include/core/modelIR/ShapeRange.h index 35db1d2..882fb25 100644 --- a/contrib/nnc/include/core/modelIR/ShapeRange.h +++ b/contrib/nnc/include/core/modelIR/ShapeRange.h @@ -22,17 +22,10 @@ #include "core/modelIR/Shape.h" #include "core/modelIR/Index.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace data -{ - -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::Index; class ShapeIter : public std::iterator { @@ -108,9 +101,7 @@ class ShapeRange { Shape& _shape; }; -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_LINALG_SHAPE_RANGE_H_ diff --git a/contrib/nnc/include/core/modelIR/Tensor.h b/contrib/nnc/include/core/modelIR/Tensor.h index d7644e8..6098911 100644 --- a/contrib/nnc/include/core/modelIR/Tensor.h +++ b/contrib/nnc/include/core/modelIR/Tensor.h @@ -22,13 +22,9 @@ #include "core/modelIR/TensorVariant.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data +namespace mir { template @@ -36,7 +32,7 @@ class Tensor final{ public: Tensor() = delete; - explicit Tensor(const ADT::TensorVariant &t) : _proxy(t), _shape(t.getShape()) { + explicit Tensor(const TensorVariant &t) : _proxy(t), _shape(t.getShape()) { } T at(const Index &id) const { @@ -58,7 +54,7 @@ class Tensor final{ virtual const Shape &getShape() const { return _proxy.getShape(); }; private: - const ADT::TensorVariant& _proxy; + const TensorVariant& _proxy; const Shape &_shape; }; @@ -71,7 +67,5 @@ class Tensor; extern template class Tensor; -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/include/core/modelIR/TensorUtil.h b/contrib/nnc/include/core/modelIR/TensorUtil.h index c92943a..0037229 100644 --- a/contrib/nnc/include/core/modelIR/TensorUtil.h +++ b/contrib/nnc/include/core/modelIR/TensorUtil.h @@ -25,27 +25,15 @@ #include "core/modelIR/Index.h" #include "core/modelIR/ShapeRange.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace data -{ -namespace util +namespace mir { -using nncc::contrib::core::data::Shape; -using namespace nncc::contrib::core::ADT; -using namespace nncc::contrib::core::data; - template static std::shared_ptr transposeTensor(std::shared_ptr tensor) { - using nncc::contrib::core::data::Index; - const Shape &inShape = tensor->getShape(); Shape targetShape{inShape.dim(Ints)...}; @@ -74,10 +62,7 @@ transposeTensor(std::shared_ptr tensor) return convertedTensor; } -} // namespace util -} // namespace data -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // _NNC_CORE_LINALG_TENSOR_UTIL_H_ diff --git a/contrib/nnc/include/core/modelIR/TensorVariant.h b/contrib/nnc/include/core/modelIR/TensorVariant.h index 7a68873..3c6d510 100644 --- a/contrib/nnc/include/core/modelIR/TensorVariant.h +++ b/contrib/nnc/include/core/modelIR/TensorVariant.h @@ -24,13 +24,10 @@ #include "core/modelIR/Index.h" #include "core/modelIR/Shape.h" -namespace nncc { -namespace contrib { -namespace core { -namespace ADT { - -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::Index; +namespace nnc +{ +namespace mir +{ constexpr int MAX_DIMENSIONS = 32; @@ -72,9 +69,7 @@ public: size_t _element_size; }; -} // namespace ADT -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_LINALG_TENSOR_VARIANT_H_ diff --git a/contrib/nnc/include/core/modelIR/graph.h b/contrib/nnc/include/core/modelIR/graph.h index 029d2d0..d02e257 100644 --- a/contrib/nnc/include/core/modelIR/graph.h +++ b/contrib/nnc/include/core/modelIR/graph.h @@ -26,17 +26,11 @@ #include "core/modelIR/operations/variable_op.h" #include "core/modelIR/ir_node.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace IR -{ -namespace model { -using ADT::INode; class IVisitor; class Graph { @@ -80,10 +74,7 @@ class Graph { std::unordered_map _outputs; }; -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_GRAPH_H_ diff --git a/contrib/nnc/include/core/modelIR/ir_dot_builder.h b/contrib/nnc/include/core/modelIR/ir_dot_builder.h index 4d730b4..64b12d7 100644 --- a/contrib/nnc/include/core/modelIR/ir_dot_builder.h +++ b/contrib/nnc/include/core/modelIR/ir_dot_builder.h @@ -22,16 +22,10 @@ #include "core/modelIR/ir_node.h" #include "core/modelIR/ir_dot_node_info.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace dumper -{ - -using nncc::contrib::core::IR::model::ADT::INode; /** * @brief Provides an API to add nodes and edges to the .dot Model IR representation @@ -52,9 +46,7 @@ private: std::stringstream dot; }; -} // namespace dumper -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //NNCC_IR_DOT_BUILDER_H diff --git a/contrib/nnc/include/core/modelIR/ir_dot_dumper.h b/contrib/nnc/include/core/modelIR/ir_dot_dumper.h index 8384650..5ac40e5 100644 --- a/contrib/nnc/include/core/modelIR/ir_dot_dumper.h +++ b/contrib/nnc/include/core/modelIR/ir_dot_dumper.h @@ -36,17 +36,10 @@ #include "core/modelIR/ir_dot_builder.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace dumper -{ - -using nncc::contrib::core::IR::model::ADT::INode; -using namespace nncc::contrib::core::IR::model; /** * @breif Model IR visitor that can be used to output Model IR as a .dot graph. @@ -76,9 +69,7 @@ private: IrDotBuilder dotBuilder; }; -} // namespace dumper -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // _NNC_BACKEND_INTERPRETER_CORE_DOTDUMPER_ diff --git a/contrib/nnc/include/core/modelIR/ir_dot_node_info.h b/contrib/nnc/include/core/modelIR/ir_dot_node_info.h index 72c145f..d6eb521 100644 --- a/contrib/nnc/include/core/modelIR/ir_dot_node_info.h +++ b/contrib/nnc/include/core/modelIR/ir_dot_node_info.h @@ -21,17 +21,10 @@ #include "core/modelIR/operations/common.h" #include "core/modelIR/operations/pool_op.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace dumper -{ - -using namespace nncc::contrib::core::IR::model; -using namespace nncc::contrib::core::data; /** * @brief Can collect information about a NN operator, and then use it to output @@ -100,9 +93,7 @@ private: PoolType poolType = PoolType::MAX; }; -} // namespace dumper -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif // NNCC_IR_NODE_DOT_BUILDER_H diff --git a/contrib/nnc/include/core/modelIR/ir_node.h b/contrib/nnc/include/core/modelIR/ir_node.h index bff7241..fcea58b 100644 --- a/contrib/nnc/include/core/modelIR/ir_node.h +++ b/contrib/nnc/include/core/modelIR/ir_node.h @@ -24,18 +24,9 @@ #include "core/modelIR/operations/operation.h" #include "core/modelIR/visitor.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model -{ - -namespace ADT +namespace mir { class INode @@ -45,12 +36,12 @@ public: struct IODescriptor { - ADT::INode* node; // Data source + INode* node; // Data source size_t index; // Output id }; virtual const std::vector &getPrevNodes() const = 0; - virtual const std::vector &getNextNodes() const = 0; + virtual const std::vector &getNextNodes() const = 0; virtual size_t getId() const = 0; @@ -75,19 +66,18 @@ class AbstractNode : public INode public: explicit AbstractNode(size_t num_inputs); const std::vector &getPrevNodes() const override; - const std::vector &getNextNodes() const override; + const std::vector &getNextNodes() const override; void connectInputTo(const int inputIndex, const IODescriptor &descriptor) override; const IODescriptor getOutput(const size_t index) override; protected: - virtual void addNextNode(ADT::INode::Ref const node) override; + virtual void addNextNode(INode::Ref const node) override; private: std::vector _inputs; - std::vector _outputs; + std::vector _outputs; }; -} // namespace ADT struct NodeProperties { @@ -107,7 +97,7 @@ struct NodeProperties }; template -class Node : public ADT::AbstractNode +class Node : public AbstractNode { public: OpType *getOperation() override { return static_cast(_props.op); } @@ -141,10 +131,7 @@ private: NodeProperties _props; }; -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_NODE_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/batch_norm.h b/contrib/nnc/include/core/modelIR/operations/batch_norm.h index 57dd4c9..8e5d464 100644 --- a/contrib/nnc/include/core/modelIR/operations/batch_norm.h +++ b/contrib/nnc/include/core/modelIR/operations/batch_norm.h @@ -19,15 +19,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -67,10 +61,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_BATCH_NORM_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/bias_add_op.h b/contrib/nnc/include/core/modelIR/operations/bias_add_op.h index 8d84cb6..67b5152 100644 --- a/contrib/nnc/include/core/modelIR/operations/bias_add_op.h +++ b/contrib/nnc/include/core/modelIR/operations/bias_add_op.h @@ -20,15 +20,9 @@ #include "core/modelIR/operations/operation.h" #include "core/modelIR/TensorVariant.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -45,10 +39,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_BIAS_ADD_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/capped_relu_op.h b/contrib/nnc/include/core/modelIR/operations/capped_relu_op.h index 1783305..928c20d 100644 --- a/contrib/nnc/include/core/modelIR/operations/capped_relu_op.h +++ b/contrib/nnc/include/core/modelIR/operations/capped_relu_op.h @@ -19,12 +19,12 @@ #include "core/modelIR/operations/operation.h" -namespace nncc { -namespace contrib { -namespace core { -namespace IR { -namespace model { -namespace ops { +namespace nnc +{ +namespace mir +{ +namespace ops +{ class CappedReluOp : public OpDescription { public: @@ -40,10 +40,7 @@ class CappedReluOp : public OpDescription { }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_CAPPED_RELU_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/common.h b/contrib/nnc/include/core/modelIR/operations/common.h index 234f127..81467a9 100644 --- a/contrib/nnc/include/core/modelIR/operations/common.h +++ b/contrib/nnc/include/core/modelIR/operations/common.h @@ -17,15 +17,9 @@ #ifndef _NNC_CORE_IR_MODEL_COMMON_H_ #define _NNC_CORE_IR_MODEL_COMMON_H_ -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -37,10 +31,7 @@ enum class PaddingType { }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_COMMOND_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/concat_op.h b/contrib/nnc/include/core/modelIR/operations/concat_op.h index fd26724..22b3ab5 100644 --- a/contrib/nnc/include/core/modelIR/operations/concat_op.h +++ b/contrib/nnc/include/core/modelIR/operations/concat_op.h @@ -19,15 +19,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -55,10 +49,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_CONCAT_OP_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/conv_2d_op.h b/contrib/nnc/include/core/modelIR/operations/conv_2d_op.h index 5a329ed..c80c718 100644 --- a/contrib/nnc/include/core/modelIR/operations/conv_2d_op.h +++ b/contrib/nnc/include/core/modelIR/operations/conv_2d_op.h @@ -25,21 +25,13 @@ #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { -using nncc::contrib::core::data::Shape; - class Conv2DOp : public OpDescription { public: @@ -68,10 +60,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_CONV_2D_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/depthwise_conv2d_op.h b/contrib/nnc/include/core/modelIR/operations/depthwise_conv2d_op.h index 459342c..2c63cf5 100644 --- a/contrib/nnc/include/core/modelIR/operations/depthwise_conv2d_op.h +++ b/contrib/nnc/include/core/modelIR/operations/depthwise_conv2d_op.h @@ -25,21 +25,13 @@ #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { -using nncc::contrib::core::data::Shape; - class DepthwiseConv2DOp : public OpDescription { public: @@ -67,10 +59,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_DEPTHWISE_CONV_2D_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/dropout_op.h b/contrib/nnc/include/core/modelIR/operations/dropout_op.h index f5aff4d..0d3fcc9 100644 --- a/contrib/nnc/include/core/modelIR/operations/dropout_op.h +++ b/contrib/nnc/include/core/modelIR/operations/dropout_op.h @@ -19,12 +19,12 @@ #include "core/modelIR/operations/operation.h" -namespace nncc { -namespace contrib { -namespace core { -namespace IR { -namespace model { -namespace ops { +namespace nnc +{ +namespace mir +{ +namespace ops +{ class DropoutOp : public OpDescription { public: @@ -40,10 +40,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_DROPOUT_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/fully_connected_op.h b/contrib/nnc/include/core/modelIR/operations/fully_connected_op.h index 6e58a50..f35963e 100644 --- a/contrib/nnc/include/core/modelIR/operations/fully_connected_op.h +++ b/contrib/nnc/include/core/modelIR/operations/fully_connected_op.h @@ -20,21 +20,13 @@ #include "core/modelIR/operations/operation.h" #include "core/modelIR/TensorVariant.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { -using namespace nncc::contrib::core; - class FullyConnectedOp : public OpDescription { public: @@ -47,10 +39,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_FULLY_CONNECTED_OP_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/operation.h b/contrib/nnc/include/core/modelIR/operations/operation.h index ca49192..792371d 100644 --- a/contrib/nnc/include/core/modelIR/operations/operation.h +++ b/contrib/nnc/include/core/modelIR/operations/operation.h @@ -23,18 +23,10 @@ #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace core -{ -namespace IR -{ -namespace model -{ - -using nncc::contrib::core::ADT::TensorVariant; class OpDescription { public: @@ -44,24 +36,21 @@ class OpDescription { size_t getNumInputs() const; size_t getNumOutputs() const; - const nncc::contrib::core::data::Shape &getInputShape(const size_t index) const; - virtual void setInputShape(const size_t index, const nncc::contrib::core::data::Shape &shape); + const nnc::mir::Shape &getInputShape(const size_t index) const; + virtual void setInputShape(const size_t index, const nnc::mir::Shape &shape); - virtual const nncc::contrib::core::data::Shape &getOutputShape(const size_t index) const; - void setOutputShape(const size_t index, const nncc::contrib::core::data::Shape &shape); + virtual const nnc::mir::Shape &getOutputShape(const size_t index) const; + void setOutputShape(const size_t index, const nnc::mir::Shape &shape); private: size_t _max_inputs; size_t _max_outputs; - std::map _inputShapes; - std::map _outputShapes; + std::map _inputShapes; + std::map _outputShapes; }; -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_OPERATION_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/pool_op.h b/contrib/nnc/include/core/modelIR/operations/pool_op.h index 62f10f2..adc0d53 100644 --- a/contrib/nnc/include/core/modelIR/operations/pool_op.h +++ b/contrib/nnc/include/core/modelIR/operations/pool_op.h @@ -24,21 +24,13 @@ #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { -using nncc::contrib::core::data::Shape; - class PoolOp : public OpDescription { public: @@ -87,10 +79,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_POOL_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/relu_op.h b/contrib/nnc/include/core/modelIR/operations/relu_op.h index 4199c19..f61dbdd 100644 --- a/contrib/nnc/include/core/modelIR/operations/relu_op.h +++ b/contrib/nnc/include/core/modelIR/operations/relu_op.h @@ -19,15 +19,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -39,10 +33,7 @@ public: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_RELU_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/reshape_op.h b/contrib/nnc/include/core/modelIR/operations/reshape_op.h index f977e79..ba18745 100644 --- a/contrib/nnc/include/core/modelIR/operations/reshape_op.h +++ b/contrib/nnc/include/core/modelIR/operations/reshape_op.h @@ -18,15 +18,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -38,8 +32,5 @@ public: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/include/core/modelIR/operations/scale_op.h b/contrib/nnc/include/core/modelIR/operations/scale_op.h index 2f8fc75..57ba941 100644 --- a/contrib/nnc/include/core/modelIR/operations/scale_op.h +++ b/contrib/nnc/include/core/modelIR/operations/scale_op.h @@ -19,15 +19,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -47,10 +41,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_SCALE_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/softmax_op.h b/contrib/nnc/include/core/modelIR/operations/softmax_op.h index 2b4b1dd..9334692 100644 --- a/contrib/nnc/include/core/modelIR/operations/softmax_op.h +++ b/contrib/nnc/include/core/modelIR/operations/softmax_op.h @@ -19,15 +19,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -55,10 +49,7 @@ private: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_SOFTMAX_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/variable_op.h b/contrib/nnc/include/core/modelIR/operations/variable_op.h index 83096f5..e1f8b8c 100644 --- a/contrib/nnc/include/core/modelIR/operations/variable_op.h +++ b/contrib/nnc/include/core/modelIR/operations/variable_op.h @@ -19,15 +19,9 @@ #include "core/modelIR/operations/operation.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace core -{ -namespace IR -{ -namespace model +namespace mir { namespace ops { @@ -39,10 +33,7 @@ public: }; } // namespace ops -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_VARIABLE_H_ diff --git a/contrib/nnc/include/core/modelIR/visitor.h b/contrib/nnc/include/core/modelIR/visitor.h index 007c468..71e84d1 100644 --- a/contrib/nnc/include/core/modelIR/visitor.h +++ b/contrib/nnc/include/core/modelIR/visitor.h @@ -17,16 +17,12 @@ #ifndef _NNC_CORE_IR_MODEL_VISITOR_H_ #define _NNC_CORE_IR_MODEL_VISITOR_H_ -namespace nncc { -namespace contrib { -namespace core { -namespace IR { -namespace model { +namespace nnc +{ +namespace mir +{ -//Forward declare INode due to circular dependecies with INode::accept(Visitor*); -namespace ADT { - class INode; -} +class INode; //Forward declare operations as we don't need anything but references namespace ops @@ -52,20 +48,20 @@ namespace ops */ class IVisitor { public: - virtual void visit(ADT::INode *node, ops::ConcatOp &op) = 0; - virtual void visit(ADT::INode *node, ops::Conv2DOp &op) = 0; - virtual void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) = 0; - virtual void visit(ADT::INode *node, ops::SoftmaxOp &op) = 0; - virtual void visit(ADT::INode *node, ops::PoolOp &op) = 0; - virtual void visit(ADT::INode *node, ops::FullyConnectedOp &op) = 0; - virtual void visit(ADT::INode *node, ops::CappedReluOp &op) = 0; - virtual void visit(ADT::INode *node, ops::BiasAddOp &op) = 0; - virtual void visit(ADT::INode *node, ops::VariableOp &op) = 0; - virtual void visit(ADT::INode *node, ops::ReluOp &op) = 0; - virtual void visit(ADT::INode *node, ops::ReshapeOp &op) = 0; - virtual void visit(ADT::INode *node, ops::ScaleOp &op) = 0; - virtual void visit(ADT::INode *node, ops::BatchNormOp &op) = 0; - virtual void visit(ADT::INode *node, ops::DropoutOp &op) = 0; + virtual void visit(INode *node, ops::ConcatOp &op) = 0; + virtual void visit(INode *node, ops::Conv2DOp &op) = 0; + virtual void visit(INode *node, ops::DepthwiseConv2DOp &op) = 0; + virtual void visit(INode *node, ops::SoftmaxOp &op) = 0; + virtual void visit(INode *node, ops::PoolOp &op) = 0; + virtual void visit(INode *node, ops::FullyConnectedOp &op) = 0; + virtual void visit(INode *node, ops::CappedReluOp &op) = 0; + virtual void visit(INode *node, ops::BiasAddOp &op) = 0; + virtual void visit(INode *node, ops::VariableOp &op) = 0; + virtual void visit(INode *node, ops::ReluOp &op) = 0; + virtual void visit(INode *node, ops::ReshapeOp &op) = 0; + virtual void visit(INode *node, ops::ScaleOp &op) = 0; + virtual void visit(INode *node, ops::BatchNormOp &op) = 0; + virtual void visit(INode *node, ops::DropoutOp &op) = 0; virtual ~IVisitor() = default; }; @@ -79,29 +75,26 @@ class IVisitor { */ class Visitor: public IVisitor{ public: - void visit(ADT::INode *node, ops::ConcatOp &op) override; - void visit(ADT::INode *node, ops::Conv2DOp &op) override; - void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) override; - void visit(ADT::INode *node, ops::SoftmaxOp &op) override; - void visit(ADT::INode *node, ops::PoolOp &op) override; - void visit(ADT::INode *node, ops::FullyConnectedOp &op) override; - void visit(ADT::INode *node, ops::CappedReluOp &op) override; - void visit(ADT::INode *node, ops::BiasAddOp &op) override; - void visit(ADT::INode *node, ops::VariableOp &op) override; - void visit(ADT::INode *node, ops::ReluOp &op) override; - void visit(ADT::INode *node, ops::ReshapeOp &op) override; - void visit(ADT::INode *node, ops::ScaleOp &op) override; - void visit(ADT::INode *node, ops::BatchNormOp &op) override; - void visit(ADT::INode *node, ops::DropoutOp &op) override; + void visit(INode *node, ops::ConcatOp &op) override; + void visit(INode *node, ops::Conv2DOp &op) override; + void visit(INode *node, ops::DepthwiseConv2DOp &op) override; + void visit(INode *node, ops::SoftmaxOp &op) override; + void visit(INode *node, ops::PoolOp &op) override; + void visit(INode *node, ops::FullyConnectedOp &op) override; + void visit(INode *node, ops::CappedReluOp &op) override; + void visit(INode *node, ops::BiasAddOp &op) override; + void visit(INode *node, ops::VariableOp &op) override; + void visit(INode *node, ops::ReluOp &op) override; + void visit(INode *node, ops::ReshapeOp &op) override; + void visit(INode *node, ops::ScaleOp &op) override; + void visit(INode *node, ops::BatchNormOp &op) override; + void visit(INode *node, ops::DropoutOp &op) override; ~Visitor() override = default; }; -} // namespace model -} // namespace IR -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_VISITOR_H_ diff --git a/contrib/nnc/include/core/serialize/Deserializer.h b/contrib/nnc/include/core/serialize/Deserializer.h index 66a2bb5..dc613f6 100644 --- a/contrib/nnc/include/core/serialize/Deserializer.h +++ b/contrib/nnc/include/core/serialize/Deserializer.h @@ -24,12 +24,10 @@ #include "core/modelIR/Shape.h" #include "core/modelIR/TensorVariant.h" -namespace nncc { -namespace contrib { -namespace core { - -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::ADT::TensorVariant; +namespace nnc +{ +namespace mir +{ /** * @brief template class for deserialization @@ -50,8 +48,7 @@ public: T deserializeFromString(const std::string&); }; -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_SERIALIZER_H diff --git a/contrib/nnc/include/core/serialize/Serializer.h b/contrib/nnc/include/core/serialize/Serializer.h index 56ccd13..0b48ba2 100644 --- a/contrib/nnc/include/core/serialize/Serializer.h +++ b/contrib/nnc/include/core/serialize/Serializer.h @@ -24,12 +24,10 @@ #include "core/modelIR/Shape.h" #include "core/modelIR/Tensor.h" -namespace nncc { -namespace contrib { -namespace core { - -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::Tensor; +namespace nnc +{ +namespace mir +{ /** * @brief template class for serialization @@ -50,8 +48,7 @@ public: std::string getSerializedObject(const T&); }; -} // namespace core -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_CORE_IR_MODEL_SERIALIZER_H diff --git a/contrib/nnc/include/option/Options.h b/contrib/nnc/include/option/Options.h index cbe8f56..5c023b2 100644 --- a/contrib/nnc/include/option/Options.h +++ b/contrib/nnc/include/option/Options.h @@ -20,11 +20,9 @@ #include #include "support/CommandLine.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace clopt +namespace cli { /** @@ -57,8 +55,7 @@ extern Option interInputData; // input data for model extern Option interInNode; // name of input node in computational graph extern Option> interOutNode; // name of output nodes in computational graph -} // namespace clopt -} // namespace contrib -} // namespace nncc +} // namespace cli +} // namespace nnc #endif //NNCC_COMMANDLINEARGUMENTS_H diff --git a/contrib/nnc/include/pass/Pass.h b/contrib/nnc/include/pass/Pass.h index 7f86b3d..005e6bb 100644 --- a/contrib/nnc/include/pass/Pass.h +++ b/contrib/nnc/include/pass/Pass.h @@ -21,11 +21,7 @@ #include "pass/PassData.h" -namespace nncc -{ -namespace contrib -{ -namespace pass +namespace nnc { /** @@ -51,8 +47,6 @@ public: virtual ~Pass() = default; }; -} // namespace pass -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_PASS_H diff --git a/contrib/nnc/include/pass/PassData.h b/contrib/nnc/include/pass/PassData.h index 7d8c90f..2191957 100644 --- a/contrib/nnc/include/pass/PassData.h +++ b/contrib/nnc/include/pass/PassData.h @@ -20,13 +20,8 @@ #include "core/modelIR/graph.h" #include "core/modelIR/TensorVariant.h" -using namespace nncc::contrib::core::IR::model; -namespace nncc -{ -namespace contrib -{ -namespace pass +namespace nnc { /** @@ -42,11 +37,11 @@ public: /** * @brief Implicit conversion from Graph* to PassData */ - /* implicit */ PassData(Graph *graph) { _dataContainer.graph = graph; _dataType = PDT::GRAPH; } + /* implicit */ PassData(mir::Graph *graph) { _dataContainer.graph = graph; _dataType = PDT::GRAPH; } /** * @brief Implicit conversion from PassData to Graph* */ - /* implicit */ operator Graph*() const { + /* implicit */ operator mir::Graph*() const { if ( _dataType != PDT::GRAPH ) return nullptr; return _dataContainer.graph; @@ -55,11 +50,11 @@ public: /** * @brief Implicit conversion from Graph* to PassData */ - /* implicit */ PassData(TensorVariant *tv) { _dataContainer.tensorVariant = tv; _dataType = PDT::TENSOR_VARIANT; } + /* implicit */ PassData(mir::TensorVariant *tv) { _dataContainer.tensorVariant = tv; _dataType = PDT::TENSOR_VARIANT; } /** * @brief Implicit conversion from PassData to Graph* */ - /* implicit */ operator TensorVariant*() const { + /* implicit */ operator mir::TensorVariant*() const { if ( _dataType != PDT::TENSOR_VARIANT ) return nullptr; return _dataContainer.tensorVariant; @@ -78,15 +73,13 @@ private: // union contains all pointers to objects that can be returned from passes union { - Graph *graph; - TensorVariant *tensorVariant; + mir::Graph *graph; + mir::TensorVariant *tensorVariant; void *unknown; } _dataContainer; }; -} // namespace pass -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_PASSDATA_H diff --git a/contrib/nnc/include/pass/PassException.h b/contrib/nnc/include/pass/PassException.h index b44a1b0..4c56083 100644 --- a/contrib/nnc/include/pass/PassException.h +++ b/contrib/nnc/include/pass/PassException.h @@ -20,11 +20,7 @@ #include #include -namespace nncc -{ -namespace contrib -{ -namespace pass +namespace nnc { /** @@ -48,8 +44,6 @@ private: std::string _msg; }; -} // namespace pass -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_PASSEXCEPTION_H diff --git a/contrib/nnc/include/pass/PassManager.h b/contrib/nnc/include/pass/PassManager.h index c66a087..6050ac1 100644 --- a/contrib/nnc/include/pass/PassManager.h +++ b/contrib/nnc/include/pass/PassManager.h @@ -19,11 +19,7 @@ #include -namespace nncc -{ -namespace contrib -{ -namespace pass +namespace nnc { // forward declaration @@ -60,8 +56,6 @@ private: Passes _passes; // registered passes }; -} // namespace pass -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // __PASS_MANAGER_H__ diff --git a/contrib/nnc/include/passes/acl_soft_backend/AclCPPGenerator.h b/contrib/nnc/include/passes/acl_soft_backend/AclCPPGenerator.h index 4ee12fb..b4a6973 100644 --- a/contrib/nnc/include/passes/acl_soft_backend/AclCPPGenerator.h +++ b/contrib/nnc/include/passes/acl_soft_backend/AclCPPGenerator.h @@ -20,15 +20,8 @@ #include "pass/Pass.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { /** @@ -50,9 +43,6 @@ public: PassData run(PassData data) override; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_ACL_CPP_GENERATOR_H_ diff --git a/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppCode.h b/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppCode.h index f2f64b7..a563697 100644 --- a/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppCode.h +++ b/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppCode.h @@ -21,13 +21,7 @@ #include -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { /** @@ -48,9 +42,6 @@ private: std::ofstream _out; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_ARTIFACT_GENERATOR_CPP_CODE_H_ diff --git a/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppDecl.h b/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppDecl.h index 3271dc4..de4818e 100644 --- a/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppDecl.h +++ b/contrib/nnc/include/passes/acl_soft_backend/ArtifactGeneratorCppDecl.h @@ -21,13 +21,7 @@ #include -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { /** @@ -48,9 +42,6 @@ private: std::ofstream _out; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_ARTIFACT_GENERATOR_CPP_DECL_H_ diff --git a/contrib/nnc/include/passes/acl_soft_backend/ArtifactModel.h b/contrib/nnc/include/passes/acl_soft_backend/ArtifactModel.h index e0a3e85..a012db6 100644 --- a/contrib/nnc/include/passes/acl_soft_backend/ArtifactModel.h +++ b/contrib/nnc/include/passes/acl_soft_backend/ArtifactModel.h @@ -23,13 +23,7 @@ #include "IArtifactGenerator.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { /** @@ -250,9 +244,6 @@ private: std::string _name; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_ARTIFACT_MODEL_H_ diff --git a/contrib/nnc/include/passes/acl_soft_backend/IArtifactGenerator.h b/contrib/nnc/include/passes/acl_soft_backend/IArtifactGenerator.h index e7e2e36..6afba26 100644 --- a/contrib/nnc/include/passes/acl_soft_backend/IArtifactGenerator.h +++ b/contrib/nnc/include/passes/acl_soft_backend/IArtifactGenerator.h @@ -17,13 +17,7 @@ #ifndef _NNC_ARTIFACT_GENERATOR_INTERFACE_H_ #define _NNC_ARTIFACT_GENERATOR_INTERFACE_H_ -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { class ArtifactEntity; @@ -49,9 +43,6 @@ public: virtual void visit(const ArtifactModule* node) = 0; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_ARTIFACT_GENERATOR_INTERFACE_H_ diff --git a/contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h b/contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h index 768ad4a..f75bd55 100644 --- a/contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h +++ b/contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h @@ -20,13 +20,8 @@ #include "pass/Pass.h" #include "pass/PassData.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -48,8 +43,6 @@ public: }; } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_CAFFEFRONTEND_H diff --git a/contrib/nnc/include/passes/common_frontend/model_allocation.h b/contrib/nnc/include/passes/common_frontend/model_allocation.h index 7ce3e61..b4c0e05 100644 --- a/contrib/nnc/include/passes/common_frontend/model_allocation.h +++ b/contrib/nnc/include/passes/common_frontend/model_allocation.h @@ -20,13 +20,7 @@ #include #include -namespace nncc -{ -namespace contrib -{ -namespace frontend -{ -namespace common +namespace nnc { // Class that can be used to memory map a file with NN model @@ -53,9 +47,6 @@ private: int fd = -1; }; -} // namespace common -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // FRONTEND_COMMON_MODEL_ALLOCATION_H_ diff --git a/contrib/nnc/include/passes/common_frontend/nn_importer.h b/contrib/nnc/include/passes/common_frontend/nn_importer.h index 051852c..aa9fad4 100644 --- a/contrib/nnc/include/passes/common_frontend/nn_importer.h +++ b/contrib/nnc/include/passes/common_frontend/nn_importer.h @@ -17,13 +17,7 @@ #ifndef FRONTEND_COMMON_INCLUDE_NN_IMPORTER_ #define FRONTEND_COMMON_INCLUDE_NN_IMPORTER_ -namespace nncc -{ -namespace contrib -{ -namespace frontend -{ -namespace common +namespace nnc { class NNImporter @@ -36,9 +30,6 @@ public: virtual void dump() = 0; }; -} // namespace common -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // FRONTEND_COMMON_INCLUDE_NN_IMPORTER_ diff --git a/contrib/nnc/include/passes/common_frontend/shape_helper.h b/contrib/nnc/include/passes/common_frontend/shape_helper.h index eef5d5a..8250ee0 100644 --- a/contrib/nnc/include/passes/common_frontend/shape_helper.h +++ b/contrib/nnc/include/passes/common_frontend/shape_helper.h @@ -19,30 +19,22 @@ #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace frontend -{ -namespace common -{ - -using nncc::contrib::core::data::Shape; class ShapeHelper { public: template - static Shape createShape(const Iterable &iter, std::size_t); + static mir::Shape createShape(const Iterable &iter, std::size_t); - static Shape &cutOffBatchDim(Shape &shape); + static mir::Shape &cutOffBatchDim(mir::Shape &shape); }; template -Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size) +mir::Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size) { - Shape sh; + mir::Shape sh; sh.resize(static_cast(size)); unsigned int i = 0; @@ -54,9 +46,6 @@ Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size) return sh; } -} // namespace common -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_SHAPE_HELPER_H diff --git a/contrib/nnc/include/passes/interpreter/Interpreter.h b/contrib/nnc/include/passes/interpreter/Interpreter.h index a86a00d..2e0b8c6 100644 --- a/contrib/nnc/include/passes/interpreter/Interpreter.h +++ b/contrib/nnc/include/passes/interpreter/Interpreter.h @@ -27,43 +27,33 @@ #include "core/modelIR/Tensor.h" -namespace nncc +namespace nnc { -namespace contrib +namespace mir { -namespace backend -{ -namespace interpreter -{ -namespace core -{ - -using namespace nncc::contrib::core::IR::model; -using nncc::contrib::core::data::Index; -using nncc::contrib::core::data::Tensor; class NNInterpreter : public IVisitor { public: explicit NNInterpreter() = default; - void visit(ADT::INode::Ref node, ops::ConcatOp &op) override; - void visit(ADT::INode::Ref node, ops::Conv2DOp &op) override; - void visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) override; - void visit(ADT::INode::Ref node, ops::ReluOp &op) override; - void visit(ADT::INode::Ref node, ops::SoftmaxOp &op) override; - void visit(ADT::INode::Ref node, ops::PoolOp &op) override; - void visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) override; - void visit(ADT::INode::Ref node, ops::CappedReluOp &op) override; - void visit(ADT::INode::Ref node, ops::BiasAddOp &op) override; - void visit(ADT::INode::Ref node, ops::VariableOp &op) override; - void visit(ADT::INode::Ref node, ops::ReshapeOp &op) override; - void visit(ADT::INode::Ref node, ops::ScaleOp &op) override; - void visit(ADT::INode::Ref node, ops::BatchNormOp &op) override; - void visit(ADT::INode::Ref node, ops::DropoutOp &op) override; + void visit(INode::Ref node, ops::ConcatOp &op) override; + void visit(INode::Ref node, ops::Conv2DOp &op) override; + void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override; + void visit(INode::Ref node, ops::ReluOp &op) override; + void visit(INode::Ref node, ops::SoftmaxOp &op) override; + void visit(INode::Ref node, ops::PoolOp &op) override; + void visit(INode::Ref node, ops::FullyConnectedOp &op) override; + void visit(INode::Ref node, ops::CappedReluOp &op) override; + void visit(INode::Ref node, ops::BiasAddOp &op) override; + void visit(INode::Ref node, ops::VariableOp &op) override; + void visit(INode::Ref node, ops::ReshapeOp &op) override; + void visit(INode::Ref node, ops::ScaleOp &op) override; + void visit(INode::Ref node, ops::BatchNormOp &op) override; + void visit(INode::Ref node, ops::DropoutOp &op) override; void setInput(const std::string &name, const TensorVariant& data); - std::vector &getResult(ADT::INode::Ref node); + std::vector &getResult(INode::Ref node); /** * @brief Intermediate interpreter results getter * @param nodeName - name of node @@ -79,18 +69,15 @@ private: * @brief Used to collect nodes data for getting intermediate interpreter results * @param n - reference to node */ - void mapByName(ADT::INode::Ref n); + void mapByName(INode::Ref n); private: std::map> vars; std::unordered_map data; - std::map nodeByName; + std::map nodeByName; }; -} // namespace core -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace mir +} // namespace nnc #endif //_NNC_BACKEND_INTERPRETER_CORE_INTERPRETER_ diff --git a/contrib/nnc/include/passes/interpreter/InterpreterPass.h b/contrib/nnc/include/passes/interpreter/InterpreterPass.h index 9552df4..2734343 100644 --- a/contrib/nnc/include/passes/interpreter/InterpreterPass.h +++ b/contrib/nnc/include/passes/interpreter/InterpreterPass.h @@ -23,17 +23,9 @@ #include "pass/Pass.h" #include "pass/PassData.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter +namespace nnc { -using namespace nncc::contrib; -using namespace nncc::contrib::pass; class InterpreterPass : public Pass { @@ -44,13 +36,10 @@ public: virtual ~InterpreterPass(); private: - nncc::contrib::core::ADT::TensorVariant loadInput(const nncc::contrib::core::data::Shape &); - nncc::contrib::core::ADT::TensorVariant *_out; + nnc::mir::TensorVariant loadInput(const nnc::mir::Shape &); + nnc::mir::TensorVariant *_out; }; -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_INTERPRETERPASS_H diff --git a/contrib/nnc/include/passes/soft_backend/BaseGenerator.h b/contrib/nnc/include/passes/soft_backend/BaseGenerator.h index 45c5cf1..36084f1 100644 --- a/contrib/nnc/include/passes/soft_backend/BaseGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/BaseGenerator.h @@ -24,15 +24,8 @@ #include #include -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { class ModelAnalyzer; @@ -98,9 +91,6 @@ protected: std::string _paramsPath; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_SOFT_BACKEND_BASE_GENERATOR_H_ diff --git a/contrib/nnc/include/passes/soft_backend/CGenerator.h b/contrib/nnc/include/passes/soft_backend/CGenerator.h index 3203455..e0e984d 100644 --- a/contrib/nnc/include/passes/soft_backend/CGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/CGenerator.h @@ -20,13 +20,7 @@ #include "passes/soft_backend/BaseGenerator.h" #include "pass/Pass.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { /** @@ -47,9 +41,6 @@ private: CCodeGenerator() = default; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_SOFT_BACKEND_C_GENERATOR_H_ diff --git a/contrib/nnc/include/passes/soft_backend/CPPGenerator.h b/contrib/nnc/include/passes/soft_backend/CPPGenerator.h index cd5cb10..5920cd5 100644 --- a/contrib/nnc/include/passes/soft_backend/CPPGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/CPPGenerator.h @@ -20,13 +20,7 @@ #include "passes/soft_backend/BaseGenerator.h" #include "pass/Pass.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { class TensorDescription; @@ -83,9 +77,6 @@ private: CPPCodeGenerator(): BaseCodeGenerator() {} }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_SOFT_BACKEND_CPP_GENERATOR_H_ diff --git a/contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h b/contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h index 4034448..97549ad 100644 --- a/contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h +++ b/contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h @@ -20,13 +20,8 @@ #include "pass/Pass.h" #include "pass/PassData.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -49,8 +44,6 @@ private: }; } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_TFLITEFRONTEND_H diff --git a/contrib/nnc/include/support/CommandLine.h b/contrib/nnc/include/support/CommandLine.h index 10529a3..9672501 100644 --- a/contrib/nnc/include/support/CommandLine.h +++ b/contrib/nnc/include/support/CommandLine.h @@ -26,11 +26,9 @@ #include #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace clopt +namespace cli { // forward declarations @@ -524,8 +522,7 @@ void checkOutFile(const Option &); void checkOutDir(const Option &); void checkDebugFile(const Option &); -} // namespace clopt -} // namespace contrib -} // namespace nncc +} // namespace cli +} // namespace nnc #endif //NNCC_COMMANDLINE_H diff --git a/contrib/nnc/pass/PassManager.cpp b/contrib/nnc/pass/PassManager.cpp index 918803c..d85f89b 100644 --- a/contrib/nnc/pass/PassManager.cpp +++ b/contrib/nnc/pass/PassManager.cpp @@ -16,11 +16,7 @@ #include "pass/PassManager.h" -namespace nncc -{ -namespace contrib -{ -namespace pass +namespace nnc { PassManager *PassManager::getPassManager() @@ -38,6 +34,4 @@ void PassManager::registerPass(Pass *pass) } // registerPass -} // namespace pass -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/acl_soft_backend/AclCPPGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCPPGenerator.cpp index 925fb53..fa32c94 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCPPGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCPPGenerator.cpp @@ -16,13 +16,7 @@ #include "passes/acl_soft_backend/AclCPPGenerator.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { AclCPPCodeGenerator::AclCPPCodeGenerator() @@ -41,7 +35,4 @@ Pass& AclCPPCodeGenerator::getInstance() return aclCPPCodeGenerator; } -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppCode.cpp b/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppCode.cpp index f796d83..41d9635 100644 --- a/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppCode.cpp +++ b/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppCode.cpp @@ -18,24 +18,26 @@ #include "passes/acl_soft_backend/ArtifactModel.h" using namespace std; -using namespace nncc::contrib::backend::soft; -ArtifactGeneratorCppCode::ArtifactGeneratorCppCode(const string& name) +namespace nnc +{ + +ArtifactGeneratorCppCode::ArtifactGeneratorCppCode(const string &name) { } -void ArtifactGeneratorCppCode::visit(const ArtifactLiteral* node) +void ArtifactGeneratorCppCode::visit(const ArtifactLiteral *node) { _out << node->getValue(); } -void ArtifactGeneratorCppCode::visit(const ArtifactFunctionCall* node) +void ArtifactGeneratorCppCode::visit(const ArtifactFunctionCall *node) { _out << node->getFuncName(); _out << "("; bool addComma = false; - for (const auto* par : node->getParamList()) + for (const auto *par : node->getParamList()) { if (addComma) _out << ", "; @@ -47,11 +49,11 @@ void ArtifactGeneratorCppCode::visit(const ArtifactFunctionCall* node) _out << ")"; } -void ArtifactGeneratorCppCode::visit(const ArtifactBlock* node) +void ArtifactGeneratorCppCode::visit(const ArtifactBlock *node) { _out << "{" << endl; - for (const auto* st : node->getStatements()) + for (const auto *st : node->getStatements()) { st->accept(this); _out << ";"; @@ -60,12 +62,14 @@ void ArtifactGeneratorCppCode::visit(const ArtifactBlock* node) _out << "}" << endl; } -void ArtifactGeneratorCppCode::visit(const ArtifactFunction* node) +void ArtifactGeneratorCppCode::visit(const ArtifactFunction *node) { } -void ArtifactGeneratorCppCode::visit(const ArtifactModule* node) +void ArtifactGeneratorCppCode::visit(const ArtifactModule *node) { } + +} // namespace nnc diff --git a/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppDecl.cpp b/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppDecl.cpp index 2d6635c..7ba279a 100644 --- a/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppDecl.cpp +++ b/contrib/nnc/passes/acl_soft_backend/ArtifactGeneratorCppDecl.cpp @@ -18,7 +18,9 @@ #include "passes/acl_soft_backend/ArtifactModel.h" using namespace std; -using namespace nncc::contrib::backend::soft; + +namespace nnc +{ ArtifactGeneratorCppDecl::ArtifactGeneratorCppDecl(const string& name) { @@ -58,3 +60,5 @@ void ArtifactGeneratorCppDecl::visit(const ArtifactModule* node) { } + +} // namespace nnc diff --git a/contrib/nnc/passes/acl_soft_backend/ArtifactModel.cpp b/contrib/nnc/passes/acl_soft_backend/ArtifactModel.cpp index 84f787e..acdbe34 100644 --- a/contrib/nnc/passes/acl_soft_backend/ArtifactModel.cpp +++ b/contrib/nnc/passes/acl_soft_backend/ArtifactModel.cpp @@ -17,4 +17,3 @@ #include "passes/acl_soft_backend/IArtifactGenerator.h" using namespace std; -using namespace nncc::contrib::backend::soft; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.cpp b/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.cpp index dc5149c..bb49089 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.cpp @@ -18,11 +18,7 @@ #include "caffe_dump_visitor.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -86,6 +82,4 @@ void DumpVisitor::visit(const BlobShape& bs) } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.h b/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.h index 176d986..ee88440 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_dump_visitor.h @@ -19,11 +19,7 @@ #include "caffe_visitor.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -38,8 +34,6 @@ public: }; } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_CAFFE_DUMP_VISITOR_H diff --git a/contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp b/contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp index 69f6bc7..384874d 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp @@ -24,14 +24,8 @@ #include "caffe_importer.h" -using namespace nncc::contrib::pass; -using namespace nncc::contrib::frontend::caffe; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -45,19 +39,17 @@ Pass &CaffeFrontend::getInstance() PassData CaffeFrontend::run(PassData data) { (void)data; - nncc::contrib::frontend::caffe::CaffeImporter importer{clopt::inputFile}; + nnc::caffe::CaffeImporter importer{cli::inputFile}; bool success = importer.import(); if (!success) { - throw PassException("Could not load model: " + clopt::inputFile + "\n"); + throw PassException("Could not load model: " + cli::inputFile + "\n"); } - return reinterpret_cast(importer.createIR()); + return reinterpret_cast(importer.createIR()); } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp index d73a299..b4c6709 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp @@ -23,11 +23,7 @@ #include "caffe_importer.h" #include "proto_reader.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -39,7 +35,7 @@ bool CaffeImporter::import() net.reset(new NetParameter()); // import success flag is returned - return util::readProtoFromBinaryFile(modelFilename.c_str(), net.get()); + return readProtoFromBinaryFile(modelFilename.c_str(), net.get()); } void *CaffeImporter::createIR() @@ -63,6 +59,4 @@ void CaffeImporter::dump() } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.h b/contrib/nnc/passes/caffe_frontend/caffe_importer.h index 406c3af..d1f6298 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.h @@ -24,18 +24,14 @@ #include "passes/common_frontend/nn_importer.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { using namespace ::caffe; -class CaffeImporter : public common::NNImporter +class CaffeImporter : public NNImporter { public: explicit CaffeImporter(std::string filename) : modelFilename(std::move(filename)) {}; @@ -50,8 +46,6 @@ private: }; } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_CAFFE_IMPORTER_H diff --git a/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.cpp b/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.cpp index 2e9c791..cdad4a3 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.cpp @@ -25,20 +25,15 @@ #include "passes/common_frontend/shape_helper.h" #include "caffe_model_visitor.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { -using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp; -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::util::transposeTensor; +using VariableOp = nnc::mir::ops::VariableOp; +using nnc::mir::Shape; +using nnc::mir::transposeTensor; void ModelVisitor::visit(const NetParameter& np) { @@ -175,8 +170,8 @@ void ModelVisitor::processInputLayer(const LayerParameter& lp) for (const auto &shape : lp.input_param().shape()) { - Shape sh = common::ShapeHelper::createShape(shape.dim(), shape.dim_size()); - inputShapes.push_back(common::ShapeHelper::cutOffBatchDim(sh)); + Shape sh = ShapeHelper::createShape(shape.dim(), shape.dim_size()); + inputShapes.push_back(ShapeHelper::cutOffBatchDim(sh)); } if (!inputShapes.empty()) @@ -218,7 +213,7 @@ std::shared_ptr ModelVisitor::createTensor(const BlobProto &bp) char *dstData = tensorBufferCopy.get(); memcpy(dstData, srcData, bufferSize); - Shape tensorShape = common::ShapeHelper::createShape( + Shape tensorShape = ShapeHelper::createShape( bp.shape().dim(), static_cast(bp.shape().dim_size())); auto tensor = std::make_shared(tensorShape, tensorBufferCopy, type, elementSize); @@ -285,6 +280,4 @@ void ModelVisitor::setIrNodeNames() { } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.h b/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.h index 3b38cd2..621c09c 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_model_visitor.h @@ -27,57 +27,44 @@ #include "caffe_visitor.h" #include "caffe_op_creator.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { -using namespace ::caffe; - -using IrTensor = nncc::contrib::core::ADT::TensorVariant; -using nncc::contrib::core::IR::model::Graph; -using nncc::contrib::core::IR::model::ADT::INode; -using nncc::contrib::core::data::Shape; - class ModelVisitor : public Visitor { public: - ModelVisitor() : graph(new Graph()), opCreator(graph) {}; + ModelVisitor() : graph(new mir::Graph()), opCreator(graph) {}; - void visit(const NetParameter&) override; - void visit(const LayerParameter&) override; - void visit(const BlobProto&) override; - void visit(const BlobShape&) override; + void visit(const ::caffe::NetParameter&) override; + void visit(const ::caffe::LayerParameter&) override; + void visit(const ::caffe::BlobProto&) override; + void visit(const ::caffe::BlobShape&) override; - Graph* getGraph(); + mir::Graph* getGraph(); void setGraphOutputs(); void setIrNodeNames(); private: - Graph* graph = nullptr; + mir::Graph* graph = nullptr; OpCreator opCreator; - std::vector inputShapes; - std::map opsForBlobsTheyOutput; - std::vector graphOutputs; + std::vector inputShapes; + std::map opsForBlobsTheyOutput; + std::vector graphOutputs; - std::shared_ptr createTensor(const BlobProto&); - std::vector createOpInputs(const LayerParameter&); - std::vector> createOpParams(const LayerParameter&); + std::shared_ptr createTensor(const ::caffe::BlobProto&); + std::vector createOpInputs(const ::caffe::LayerParameter&); + std::vector> createOpParams(const ::caffe::LayerParameter&); void createGraphInputs(const std::vector &names, - const std::vector &shapes); - void processInputLayer(const LayerParameter&); - void processDeprecatedInput(const NetParameter&); + const std::vector &shapes); + void processInputLayer(const ::caffe::LayerParameter&); + void processDeprecatedInput(const ::caffe::NetParameter&); }; } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_CAFFE_IR_VISITOR_H diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index ce75e88..2bba6d2 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -38,20 +38,14 @@ #include -using namespace nncc::contrib::pass; -using namespace nncc::contrib::core::data; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { -namespace util -{ +using namespace mir; + template static inline bool has2DStride(const OptsType& opts) @@ -224,7 +218,7 @@ static std::shared_ptr fixGroupedKernel(int groups, std::shared_ptr fixGroupedKernel(int groups, std::shared_ptrat(foldedIdx), foldedKernel->at(foldedIdx) + dataSize, unfoldKernel->at(idx)); @@ -248,7 +242,6 @@ static std::shared_ptr fixGroupedKernel(int groups, std::shared_ptr OpCreator::createConv2D(InputOps inputs, InputParams params, const caffe::ConvolutionParameter& opts) @@ -256,13 +249,13 @@ std::vector OpCreator::createConv2D(InputOps inputs, InputParams par assert(opts.stride_size() <= 2); ops::PaddingType padType = ops::PaddingType::Custom; - Shape strideShape = util::getConvStride(opts); + Shape strideShape = getConvStride(opts); std::shared_ptr unfoldedTensor = params[0]; if (opts.group() != 1) { // first we need to convert kernel of grouped convolution to appropriate ordinary kernel - unfoldedTensor = util::fixGroupedKernel(opts.group(), params[0]); + unfoldedTensor = fixGroupedKernel(opts.group(), params[0]); } auto outputs = createOp(inputs, std::move(*unfoldedTensor), strideShape, padType); @@ -339,7 +332,7 @@ std::vector OpCreator::createConcat(InputOps inputs, InputParams par { (void)params; - return createOp(inputs, inputs.size(), util::getAxisValue(opts)); + return createOp(inputs, inputs.size(), getAxisValue(opts)); } std::vector OpCreator::createPool(InputOps inputs, InputParams params, @@ -352,10 +345,10 @@ std::vector OpCreator::createPool(InputOps inputs, InputParams param throw PassException("Pooling layer global_pooling param is not supported yet"); } - Shape windowShape = util::getPoolWindowShape(opts); - ops::PoolOp::PoolingType poolType = util::getPoolingType(opts); + Shape windowShape = getPoolWindowShape(opts); + ops::PoolOp::PoolingType poolType = getPoolingType(opts); ops::PaddingType padType = ops::PaddingType::Custom; - Shape stride = util::getPoolStride(opts); + Shape stride = getPoolStride(opts); ops::PoolOp::BorderType borderType; switch (poolType) { @@ -394,7 +387,7 @@ std::vector OpCreator::createSoftmax(InputOps inputs, InputParams pa { (void)params; - return createOp(inputs, util::getAxisValue(opts)); + return createOp(inputs, getAxisValue(opts)); } /** @@ -420,7 +413,7 @@ std::vector OpCreator::createReshape(InputOps inputs, InputParams pa throw PassException("Reshape layer doesn't have shape parameter"); } - Shape newShape = common::ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size()); + Shape newShape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size()); for (unsigned int i = 0; i < newShape.rank(); ++i) { @@ -468,7 +461,7 @@ std::vector OpCreator::createBatchNorm(InputOps inputs, InputParams if (params[2]->getShape().rank() != 1 && params[2]->getShape().dim(0) != 1) throw PassException("Unexpected shape of scale parameter in batch norm"); - float scaleFactor = *reinterpret_cast(params[2]->at(core::data::Index{0})); + float scaleFactor = *reinterpret_cast(params[2]->at(mir::Index{0})); // Code below is taken from cpu caffe implementation: // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100 if (scaleFactor != 0.0f) @@ -518,6 +511,4 @@ void OpCreator::connectInputs(INode::Ref op, InputOps inputs) } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h index 96752fe..a901ce0 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h @@ -29,22 +29,18 @@ #include "caffe/proto/caffe.pb.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { using namespace ::caffe; -namespace ops = nncc::contrib::core::IR::model::ops; -using nncc::contrib::core::IR::model::Graph; -using nncc::contrib::core::IR::model::ADT::INode; -using IrTensor = nncc::contrib::core::ADT::TensorVariant; -using nncc::contrib::core::data::Shape; +namespace ops = nnc::mir::ops; +using nnc::mir::Graph; +using nnc::mir::INode; +using IrTensor = nnc::mir::TensorVariant; +using nnc::mir::Shape; class OpCreator { @@ -89,8 +85,6 @@ std::vector OpCreator::createOp(std::vector& inputs, Typ } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_CAFFE_OP_CREATOR_H diff --git a/contrib/nnc/passes/caffe_frontend/caffe_visitor.h b/contrib/nnc/passes/caffe_frontend/caffe_visitor.h index 866972a..ec01e73 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_visitor.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_visitor.h @@ -19,11 +19,7 @@ #include "caffe/proto/caffe.pb.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -42,8 +38,6 @@ public: }; } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //NNCC_CAFFE_VISITOR_H diff --git a/contrib/nnc/passes/caffe_frontend/caffe_walker.cpp b/contrib/nnc/passes/caffe_frontend/caffe_walker.cpp index 6a48203..4247aa5 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_walker.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_walker.cpp @@ -16,11 +16,7 @@ #include "caffe_walker.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -51,6 +47,4 @@ void ModelWalker::walkLayerParameter(const LayerParameter& lp) } } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/caffe_walker.h b/contrib/nnc/passes/caffe_frontend/caffe_walker.h index 00f715e..83a02f9 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_walker.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_walker.h @@ -21,11 +21,7 @@ #include "caffe_visitor.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { @@ -45,8 +41,6 @@ private: }; } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_CAFFE_WALKER_H diff --git a/contrib/nnc/passes/caffe_frontend/proto_reader.cpp b/contrib/nnc/passes/caffe_frontend/proto_reader.cpp index 75c172d..6cba9ed 100644 --- a/contrib/nnc/passes/caffe_frontend/proto_reader.cpp +++ b/contrib/nnc/passes/caffe_frontend/proto_reader.cpp @@ -23,16 +23,10 @@ #include "proto_reader.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { -namespace util -{ const int protoBytesLimit = INT_MAX; const int protoBytesWarningLimit = 1024 * 1024 * 512; @@ -75,8 +69,5 @@ bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto) return success; } -} // namespace util } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/caffe_frontend/proto_reader.h b/contrib/nnc/passes/caffe_frontend/proto_reader.h index 0e28dad..16aaf4b 100644 --- a/contrib/nnc/passes/caffe_frontend/proto_reader.h +++ b/contrib/nnc/passes/caffe_frontend/proto_reader.h @@ -21,16 +21,10 @@ #include "google/protobuf/io/zero_copy_stream_impl.h" #include "google/protobuf/text_format.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace caffe { -namespace util -{ using google::protobuf::io::FileInputStream; using google::protobuf::io::ZeroCopyInputStream; @@ -39,10 +33,7 @@ using google::protobuf::io::CodedInputStream; bool readProtoFromTextFile(const char* filename, ::caffe::NetParameter* proto); bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto); -} // namespace util } // namespace caffe -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_PROTO_READER_H diff --git a/contrib/nnc/passes/common_frontend/model_allocation.cpp b/contrib/nnc/passes/common_frontend/model_allocation.cpp index ae022bc..fa6969c 100644 --- a/contrib/nnc/passes/common_frontend/model_allocation.cpp +++ b/contrib/nnc/passes/common_frontend/model_allocation.cpp @@ -21,7 +21,8 @@ #include "passes/common_frontend/model_allocation.h" -using namespace nncc::contrib::frontend::common; +namespace nnc +{ ModelAllocation::ModelAllocation(std::string filename) { @@ -36,7 +37,7 @@ ModelAllocation::ModelAllocation(std::string filename) stat st{}; int flag = fstat(fd, &st); if (flag == -1) - { + { return; } @@ -67,3 +68,5 @@ ModelAllocation::~ModelAllocation() const void *ModelAllocation::getDataPnt() { return mmapState == MAPPED ? dataPnt : nullptr; } size_t ModelAllocation::getNumBytes() { return mmapState == MAPPED ? numBytes : 0; } + +} // namespace nnc diff --git a/contrib/nnc/passes/common_frontend/shape_helper.cpp b/contrib/nnc/passes/common_frontend/shape_helper.cpp index 5acae2f..d3d2c25 100644 --- a/contrib/nnc/passes/common_frontend/shape_helper.cpp +++ b/contrib/nnc/passes/common_frontend/shape_helper.cpp @@ -19,18 +19,11 @@ #include "passes/common_frontend/shape_helper.h" #include "pass/PassException.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend -{ -namespace common +namespace nnc { -Shape &ShapeHelper::cutOffBatchDim(Shape &shape) +mir::Shape &ShapeHelper::cutOffBatchDim(mir::Shape &shape) { if (shape.dim(0) != 1) { @@ -49,7 +42,4 @@ Shape &ShapeHelper::cutOffBatchDim(Shape &shape) return shape; } -} // namespace common -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/Interpreter.cpp b/contrib/nnc/passes/interpreter/Interpreter.cpp index 56ee022..19c6f7d 100644 --- a/contrib/nnc/passes/interpreter/Interpreter.cpp +++ b/contrib/nnc/passes/interpreter/Interpreter.cpp @@ -46,26 +46,16 @@ #include "ops/Dropout.h" #include "ops/BatchNorm.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace core +namespace nnc { -using nncc::contrib::core::data::Tensor; -namespace ops = nncc::contrib::core::IR::model::ops; -namespace impl = nncc::contrib::backend::interpreter::impl; +using namespace nnc::mir; std::vector &NNInterpreter::var(size_t id) { return vars[id]; } void NNInterpreter::setInput(const std::string &name, const TensorVariant& t) { data.emplace(name, t); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::VariableOp &op) +void NNInterpreter::visit(INode::Ref node, ops::VariableOp &op) { mapByName(node); (void)op; @@ -77,7 +67,7 @@ void NNInterpreter::visit(ADT::INode::Ref node, ops::VariableOp &op) var(node->getId()) = {it->second}; } -std::vector &NNInterpreter::getResult(ADT::INode::Ref node) +std::vector &NNInterpreter::getResult(INode::Ref node) { auto res = vars.find(node->getId()); if (res != vars.end()) @@ -98,7 +88,7 @@ std::vector &NNInterpreter::getOperationResult(const std::string return getResult(it->second); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::ConcatOp &op) +void NNInterpreter::visit(INode::Ref node, ops::ConcatOp &op) { mapByName(node); auto &operands = node->getPrevNodes(); @@ -107,111 +97,111 @@ void NNInterpreter::visit(ADT::INode::Ref node, ops::ConcatOp &op) { ins.push_back(var(in.node->getId())[in.index]); } - var(node->getId()) = impl::Concat(ins, op.getOutputShape(0), op.getAxis())(); + var(node->getId()) = Concat(ins, op.getOutputShape(0), op.getAxis())(); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::Conv2DOp &op) +void NNInterpreter::visit(INode::Ref node, ops::Conv2DOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; - var(node->getId()) = impl::Conv2D(var(operand.node->getId())[operand.index], op)(); + var(node->getId()) = Conv2D(var(operand.node->getId())[operand.index], op)(); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::ReshapeOp &op) +void NNInterpreter::visit(INode::Ref node, ops::ReshapeOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = impl::Reshape(input, op)(); + var(node->getId()) = Reshape(input, op)(); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::ReluOp &op) +void NNInterpreter::visit(INode::Ref node, ops::ReluOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; Tensor input(var(operand.node->getId())[operand.index]); - var(node->getId()) = impl::Fill( + var(node->getId()) = Fill( op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })(); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::SoftmaxOp &op) +void NNInterpreter::visit(INode::Ref node, ops::SoftmaxOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = impl::Softmax(op.getInputShape(0), input, op.getAxis())(); + var(node->getId()) = Softmax(op.getInputShape(0), input, op.getAxis())(); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::PoolOp &op) +void NNInterpreter::visit(INode::Ref node, ops::PoolOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = impl::Pool(input, op)(); + var(node->getId()) = Pool(input, op)(); } -void NNInterpreter::visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) +void NNInterpreter::visit(INode::Ref node, ops::FullyConnectedOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; TensorVariant input = var(operand.node->getId())[operand.index]; - var(node->getId()) = impl::FullyConnected(input, op)(); + var(node->getId()) = FullyConnected(input, op)(); } -void NNInterpreter::visit(ADT::INode *node, ops::CappedReluOp &op) +void NNInterpreter::visit(INode *node, ops::CappedReluOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; Tensor input(var(operand.node->getId())[operand.index]); - var(node->getId()) = impl::Fill(op.getOutputShape(0), [&input, &op](const Index &id) { + var(node->getId()) = Fill(op.getOutputShape(0), [&input, &op](const Index &id) { return std::min(std::max(input.at(id), 0.0f), op.getCap()); })(); } -void NNInterpreter::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) +void NNInterpreter::visit(INode *node, ops::DepthwiseConv2DOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; TensorVariant input(var(operand.node->getId())[operand.index]); - var(node->getId()) = impl::DepthwiseConv2D(input, op)(); + var(node->getId()) = DepthwiseConv2D(input, op)(); } -void NNInterpreter::visit(ADT::INode *node, ops::BiasAddOp &op) +void NNInterpreter::visit(INode *node, ops::BiasAddOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; auto input = var(operand.node->getId())[operand.index]; - var(node->getId()) = impl::BiasAdd(input, op.getWeights(), op.getOutputShape(0))(); + var(node->getId()) = BiasAdd(input, op.getWeights(), op.getOutputShape(0))(); } -void NNInterpreter::visit(ADT::INode *node, ops::BatchNormOp &op) +void NNInterpreter::visit(INode *node, ops::BatchNormOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; TensorVariant input(var(operand.node->getId())[operand.index]); // TODO implement this - var(node->getId()) = impl::BatchNorm(input, op)(); + var(node->getId()) = BatchNorm(input, op)(); } -void NNInterpreter::visit(ADT::INode *node, ops::ScaleOp &op) +void NNInterpreter::visit(INode *node, ops::ScaleOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; TensorVariant input(var(operand.node->getId())[operand.index]); // TODO implement this - var(node->getId()) = impl::Scale(input, op)(); + var(node->getId()) = Scale(input, op)(); } -void NNInterpreter::visit(ADT::INode *node, ops::DropoutOp &op) +void NNInterpreter::visit(INode *node, ops::DropoutOp &op) { mapByName(node); auto operand = node->getPrevNodes()[0]; TensorVariant input(var(operand.node->getId())[operand.index]); // TODO implement this - var(node->getId()) = impl::Dropout(input, op)(); + var(node->getId()) = Dropout(input, op)(); } -void NNInterpreter::mapByName(ADT::INode::Ref n) { +void NNInterpreter::mapByName(INode::Ref n) { auto &nodeName = n->getName(); if (nodeByName.find(nodeName) != nodeByName.end()) { @@ -223,8 +213,4 @@ void NNInterpreter::mapByName(ADT::INode::Ref n) { nodeByName[nodeName] = n; } -} // namespace core -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/interpreter_pass.cpp b/contrib/nnc/passes/interpreter/interpreter_pass.cpp index b1d99c2..b2cf753 100644 --- a/contrib/nnc/passes/interpreter/interpreter_pass.cpp +++ b/contrib/nnc/passes/interpreter/interpreter_pass.cpp @@ -43,21 +43,10 @@ #include "core/modelIR/Tensor.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter +namespace nnc { -using namespace nncc::contrib; -using namespace nncc::contrib::pass; -using namespace nncc::contrib::core::data; -using namespace nncc::contrib::core::IR::model; -using nncc::contrib::core::data::Shape; -using nncc::contrib::backend::interpreter::core::NNInterpreter; +using namespace mir; Pass &InterpreterPass::getInstance() { static InterpreterPass instance; @@ -117,18 +106,18 @@ PassData InterpreterPass::run(PassData data) g->accept(&shapeInference); // Check nodes - auto inputNode = g->getInput(clopt::interInNode); + auto inputNode = g->getInput(cli::interInNode); if (inputNode == nullptr) { - throw PassException("input node <" + clopt::interInNode +"> not found" ); + throw PassException("input node <" + cli::interInNode +"> not found" ); } auto input = loadInput(inputNode->getOperation()->getOutputShape(0)); - interpreter.setInput(clopt::interInNode, input); + interpreter.setInput(cli::interInNode, input); g->accept(&interpreter); // Check nodes - for (auto &tensorName : clopt::interOutNode) { + for (auto &tensorName : cli::interOutNode) { auto outputNode = interpreter.getOperationResult(tensorName); if (outputNode.empty()) { throw PassException("output node <" + tensorName + "> not found"); @@ -137,14 +126,14 @@ PassData InterpreterPass::run(PassData data) } } - bool is_several_outs = (clopt::interOutNode.size() > 1); + bool is_several_outs = (cli::interOutNode.size() > 1); - nncc::contrib::core::ADT::TensorVariant *out = nullptr; - for (auto &tensorName : clopt::interOutNode) { + nnc::mir::TensorVariant *out = nullptr; + for (auto &tensorName : cli::interOutNode) { out = new TensorVariant(interpreter.getOperationResult(tensorName)[0]); #ifdef NNC_HDF5_SUPPORTED - writeTensorToHDF5File(out, tensorName, clopt::artifactDir); + writeTensorToHDF5File(out, tensorName, cli::artifactDir); #else std::cout << "Result <" << tensorName << "> wasn't saved, due to lack of HDF5" << std::endl; #endif // NNC_HDF5_SUPPORTED @@ -159,7 +148,7 @@ PassData InterpreterPass::run(PassData data) TensorVariant InterpreterPass::loadInput(const Shape &shape) { - auto f = fopen(clopt::interInputData.c_str(), "rb"); + auto f = fopen(cli::interInputData.c_str(), "rb"); assert(f && "Cannot open file"); int is_error = fseek(f, 0L, SEEK_END); @@ -170,7 +159,7 @@ TensorVariant InterpreterPass::loadInput(const Shape &shape) // Check size if (len != tensorSize) { std::stringstream info; - info << "Wrong input file size <" << clopt::interInputData << "> = " << len << ". Should be :" << tensorSize; + info << "Wrong input file size <" << cli::interInputData << "> = " << len << ". Should be :" << tensorSize; throw PassException(info.str()); } @@ -190,7 +179,4 @@ InterpreterPass::~InterpreterPass() delete _out; } -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/BatchNorm.h b/contrib/nnc/passes/interpreter/ops/BatchNorm.h index b23220d..5fd7350 100644 --- a/contrib/nnc/passes/interpreter/ops/BatchNorm.h +++ b/contrib/nnc/passes/interpreter/ops/BatchNorm.h @@ -22,18 +22,8 @@ #include "core/modelIR/operations/batch_norm.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::IR::model::ops::BatchNormOp; /** * @brief Implements DropoutOp for interpreter backend @@ -48,29 +38,25 @@ public: * @param in input data * @param op batch normalization operation description */ - explicit BatchNorm(const TensorVariant& input, const BatchNormOp& op) : _input(input), _op(op) {} + explicit BatchNorm(const mir::TensorVariant& input, const mir::ops::BatchNormOp& op) : _input(input), _op(op) {} /** * @brief computes operation aplication result * @return vector of all outputs from this node */ - std::vector operator()() override + std::vector operator()() override { //For now BatchNorm just copies input to output - return Fill(_input.getShape(), [this](const Index& idx) { + return Fill(_input.getShape(), [this](const mir::Index& idx) { return _input.at(idx); })(); } private: - const Tensor _input; - const BatchNormOp& _op; + const mir::Tensor _input; + const mir::ops::BatchNormOp& _op; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Bias.h b/contrib/nnc/passes/interpreter/ops/Bias.h index 3a2a705..46bbc4e 100644 --- a/contrib/nnc/passes/interpreter/ops/Bias.h +++ b/contrib/nnc/passes/interpreter/ops/Bias.h @@ -20,21 +20,13 @@ #include "OperationImpl.h" #include "Fill.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { class BiasAdd : public OperationImpl { public: - BiasAdd(const TensorVariant &input, const TensorVariant &weights, const Shape &outputShape) + BiasAdd(const mir::TensorVariant &input, const mir::TensorVariant &weights, const mir::Shape &outputShape) : _weights(weights), _input(input), _outputShape(outputShape) { assert(_weights.getShape().rank() == 1); @@ -42,23 +34,19 @@ public: assert(_outputShape.dim(_outputShape.rank() - 1) == _weights.getShape().dim(0)); } - std::vector operator()() override + std::vector operator()() override { - return Fill(_outputShape, [this](const Index &idx) { + return Fill(_outputShape, [this](const mir::Index &idx) { return _input.at(idx) + _weights.at({idx.at(idx.rank() - 1)}); })(); } private: - const Tensor _weights; - const Tensor _input; - const Shape &_outputShape; + const mir::Tensor _weights; + const mir::Tensor _input; + const mir::Shape &_outputShape; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_BACKEND_INTERPRETER_BIAS_ diff --git a/contrib/nnc/passes/interpreter/ops/Concat.h b/contrib/nnc/passes/interpreter/ops/Concat.h index c5e8665..518b040 100644 --- a/contrib/nnc/passes/interpreter/ops/Concat.h +++ b/contrib/nnc/passes/interpreter/ops/Concat.h @@ -19,37 +19,29 @@ #include "Fill.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { template class Concat : public Fill { public: - explicit Concat(const std::vector &inputs, const Shape &outputShape, + explicit Concat(const std::vector &inputs, const mir::Shape &outputShape, unsigned int axis) : Fill(outputShape, getSingleFunction(inputs, axis)) { } private: - const std::function getSingleFunction(const std::vector &inputs, + const std::function getSingleFunction(const std::vector &inputs, unsigned int axis) { - std::vector> inputAccessors; + std::vector> inputAccessors; for (auto &in : inputs) { inputAccessors.emplace_back(in); } - return std::function([inputAccessors, axis](const Index &id) -> T { + return std::function([inputAccessors, axis](const mir::Index &id) -> T { unsigned int mi = 0; uint32_t along_axis = id.at(axis); @@ -59,7 +51,7 @@ private: mi++; } - Index local_id = id; + mir::Index local_id = id; local_id.at(axis) = along_axis; return inputAccessors[mi].at(local_id); @@ -67,10 +59,6 @@ private: } }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_FILL_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.cpp b/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.cpp index c5c380b..cb77bff 100644 --- a/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.cpp +++ b/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.cpp @@ -19,17 +19,12 @@ #include "Depthwise_conv_2D.h" #include "common.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { +using namespace mir; +using namespace mir::ops; + std::vector DepthwiseConv2D::operator()() { TensorVariant res = allocate_tensor(_out_shape); @@ -84,8 +79,4 @@ DepthwiseConv2D::DepthwiseConv2D(const TensorVariant &input, const DepthwiseConv assert(_kernel.getShape().dim(2) == _input.getShape().dim(2)); } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.h b/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.h index d1a55a0..3f54afd 100644 --- a/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.h +++ b/contrib/nnc/passes/interpreter/ops/Depthwise_conv_2D.h @@ -22,39 +22,24 @@ #include "core/modelIR/operations/common.h" #include "core/modelIR/operations/depthwise_conv2d_op.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::IR::model::ops::DepthwiseConv2DOp; -using nncc::contrib::core::IR::model::ops::PaddingType; class DepthwiseConv2D : public OperationImpl { public: - explicit DepthwiseConv2D(const TensorVariant &input, const DepthwiseConv2DOp &op); - virtual std::vector operator()() override; + explicit DepthwiseConv2D(const mir::TensorVariant &input, const mir::ops::DepthwiseConv2DOp &op); + virtual std::vector operator()() override; private: - const Tensor _input; - const Tensor _kernel; - const Shape _strides; - const PaddingType _padding; - const Shape &_out_shape; - const DepthwiseConv2DOp &_op; + const mir::Tensor _input; + const mir::Tensor _kernel; + const mir::Shape _strides; + const mir::ops::PaddingType _padding; + const mir::Shape &_out_shape; + const mir::ops::DepthwiseConv2DOp &_op; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_DEPTHWISE_CONV2D_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Dropout.h b/contrib/nnc/passes/interpreter/ops/Dropout.h index f2481ce..001c9b1 100644 --- a/contrib/nnc/passes/interpreter/ops/Dropout.h +++ b/contrib/nnc/passes/interpreter/ops/Dropout.h @@ -22,18 +22,8 @@ #include "core/modelIR/operations/dropout_op.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::IR::model::ops::DropoutOp; /** * @brief Implements DropoutOp for interpreter backend @@ -47,32 +37,28 @@ public: * @param in input data * @param op dropout operation description */ - explicit Dropout(const TensorVariant& in, const DropoutOp& op) : _input(in), _op(op) {} + explicit Dropout(const mir::TensorVariant& in, const mir::ops::DropoutOp& op) : _input(in), _op(op) {} /** * @brief computes operation aplication result * @return vector of all outputs from this node */ - std::vector operator()() override; + std::vector operator()() override; private: - const Tensor _input; - const DropoutOp& _op; + const mir::Tensor _input; + const mir::ops::DropoutOp& _op; }; template -std::vector Dropout::operator()() +std::vector Dropout::operator()() { //For now dropout just copies input to output - return Fill(_input.getShape(), [this](const Index& idx) { + return Fill(_input.getShape(), [this](const mir::Index& idx) { return _input.at(idx); })(); } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Elementwise.h b/contrib/nnc/passes/interpreter/ops/Elementwise.h index e867f65..ac0fe0b 100644 --- a/contrib/nnc/passes/interpreter/ops/Elementwise.h +++ b/contrib/nnc/passes/interpreter/ops/Elementwise.h @@ -22,53 +22,38 @@ #include "OperationImpl.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::ADT::TensorVariant; -using nncc::contrib::core::data::Tensor; template class Elementwise : public OperationImpl { public: - explicit Elementwise(const Shape &shape) : _shape(shape){}; + explicit Elementwise(const mir::Shape &shape) : _shape(shape){}; - std::vector operator()() override + std::vector operator()() override { auto res = OperationImpl::allocate_tensor(_shape); - Tensor accessor(res); + mir::Tensor accessor(res); elemwise(accessor); return {res}; } - virtual T single(const Index &index) = 0; + virtual T single(const mir::Index &index) = 0; protected: - void elemwise(Tensor &res) + void elemwise(mir::Tensor &res) { - for (auto &idx : ShapeRange(_shape)) + for (auto &idx : mir::ShapeRange(_shape)) { res.at(idx) = single(idx); } } - const Shape &_shape; + const mir::Shape &_shape; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_ELEMENTWISE_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Fill.h b/contrib/nnc/passes/interpreter/ops/Fill.h index c1db968..07fa10a 100644 --- a/contrib/nnc/passes/interpreter/ops/Fill.h +++ b/contrib/nnc/passes/interpreter/ops/Fill.h @@ -21,35 +21,23 @@ #include "Elementwise.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { template class Fill : public Elementwise { public: - explicit Fill(const Shape &shape, std::function f) + explicit Fill(const mir::Shape &shape, std::function f) : Elementwise(shape), _fval(f) { } - T single(const Index &index) override { return _fval(index); } + T single(const mir::Index &index) override { return _fval(index); } private: - const std::function _fval; + const std::function _fval; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_FILL_ diff --git a/contrib/nnc/passes/interpreter/ops/FullyConnected.h b/contrib/nnc/passes/interpreter/ops/FullyConnected.h index 84a72af..2cf882e 100644 --- a/contrib/nnc/passes/interpreter/ops/FullyConnected.h +++ b/contrib/nnc/passes/interpreter/ops/FullyConnected.h @@ -21,37 +21,27 @@ #include "core/modelIR/operations/fully_connected_op.h" #include "OperationImpl.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::IR::model::ops::FullyConnectedOp; template class FullyConnected : public OperationImpl { public: - FullyConnected(const TensorVariant &_input, const FullyConnectedOp &_op) : _op(_op), _input(_input) {} + FullyConnected(const mir::TensorVariant &_input, const mir::ops::FullyConnectedOp &_op) : _op(_op), _input(_input) {} - std::vector operator()() override + std::vector operator()() override { - TensorVariant res = OperationImpl::allocate_tensor(_op.getOutputShape(0)); - Tensor accessor(res); + mir::TensorVariant res = OperationImpl::allocate_tensor(_op.getOutputShape(0)); + mir::Tensor accessor(res); - ShapeRange outRange(res.getShape()); + mir::ShapeRange outRange(res.getShape()); - Tensor weights(_op.getWeights()); - const Shape &wShape = weights.getShape(); + mir::Tensor weights(_op.getWeights()); + const mir::Shape &wShape = weights.getShape(); uint32_t wRank = wShape.rank(); - const Shape &inShape = _input.getShape(); + const mir::Shape &inShape = _input.getShape(); uint32_t inRank = inShape.rank(); assert(inShape.dim(inRank - 1) == wShape.dim(wRank - 2)); @@ -62,7 +52,7 @@ public: uint32_t col; for (auto &outIdx : outRange) { - Index tIdx = outIdx; + mir::Index tIdx = outIdx; T& outputElement = accessor.at(outIdx); col = tIdx.at(wRank - 1); row = tIdx.at(wRank - 2); @@ -82,14 +72,10 @@ public: } private: - const FullyConnectedOp &_op; - const Tensor _input; + const mir::ops::FullyConnectedOp &_op; + const mir::Tensor _input; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_FULLYCONNECTED_ diff --git a/contrib/nnc/passes/interpreter/ops/OperationImpl.h b/contrib/nnc/passes/interpreter/ops/OperationImpl.h index 1694e75..9ab4753 100644 --- a/contrib/nnc/passes/interpreter/ops/OperationImpl.h +++ b/contrib/nnc/passes/interpreter/ops/OperationImpl.h @@ -24,29 +24,16 @@ #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using namespace nncc::contrib::core::data; -using nncc::contrib::core::ADT::TensorVariant; - -using nncc::contrib::core::data::Shape; template class OperationImpl { public: - virtual std::vector operator()() = 0; + virtual std::vector operator()() = 0; protected: - TensorVariant allocate_tensor(const Shape &shape) + mir::TensorVariant allocate_tensor(const mir::Shape &shape) { size_t data_size = 1; for (uint32_t i = 0; i < shape.rank(); ++i) @@ -59,16 +46,12 @@ protected: std::shared_ptr data(od, [](const T* d) { delete[] d; }); // Use hardcoded DTYPE for now, since theres no support for operations on types other than // floats - TensorVariant t(shape, data, TensorVariant::DTYPE::FLOAT); + mir::TensorVariant t(shape, data, mir::TensorVariant::DTYPE::FLOAT); return t; } }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_OPERATION_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Pool.cpp b/contrib/nnc/passes/interpreter/ops/Pool.cpp index 3fcb104..35fa789 100644 --- a/contrib/nnc/passes/interpreter/ops/Pool.cpp +++ b/contrib/nnc/passes/interpreter/ops/Pool.cpp @@ -22,17 +22,12 @@ #include "Pool.h" #include "common.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { +using namespace mir; +using namespace mir::ops; + Pool::Pool(const TensorVariant &_input, const PoolOp &op) : _op(op), _input(_input) { assert(op.getWindowShape().rank() == _input.getShape().rank()); @@ -118,8 +113,4 @@ float Pool::poolingFunc(float prev, float val) } } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/Pool.h b/contrib/nnc/passes/interpreter/ops/Pool.h index 60349b0..07b12b6 100644 --- a/contrib/nnc/passes/interpreter/ops/Pool.h +++ b/contrib/nnc/passes/interpreter/ops/Pool.h @@ -21,38 +21,23 @@ #include "core/modelIR/operations/pool_op.h" #include "core/modelIR/operations/common.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::IR::model::ops::PaddingType; -using nncc::contrib::core::IR::model::ops::PoolOp; class Pool : public OperationImpl { public: - std::vector operator()() override; + std::vector operator()() override; - explicit Pool(const TensorVariant &_input, const PoolOp &op); + explicit Pool(const mir::TensorVariant &_input, const mir::ops::PoolOp &op); float poolingFunc(float prev, float val); private: - const PoolOp&_op; - const Tensor _input; + const mir::ops::PoolOp&_op; + const mir::Tensor _input; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_POOL_ diff --git a/contrib/nnc/passes/interpreter/ops/Reduce.h b/contrib/nnc/passes/interpreter/ops/Reduce.h index e77a4f5..18900de 100644 --- a/contrib/nnc/passes/interpreter/ops/Reduce.h +++ b/contrib/nnc/passes/interpreter/ops/Reduce.h @@ -26,21 +26,13 @@ #include "Fill.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { template class Reduce : public OperationImpl { public: - Reduce(const Shape &inputShape, const Shape &outputShape, const TensorVariant &input, uint32_t axis, + Reduce(const mir::Shape &inputShape, const mir::Shape &outputShape, const mir::TensorVariant &input, uint32_t axis, std::function reduceFunc) : _inShape(inputShape), _outputShape(outputShape), _input(input), _axis(axis), _reduceFunc(reduceFunc) @@ -48,11 +40,11 @@ public: assert(outputShape.dim(axis) == 1); } - std::vector operator()() override + std::vector operator()() override { - return Fill(_outputShape, [this](const Index &id) { + return Fill(_outputShape, [this](const mir::Index &id) { T element = T(); - Index inputId = id; + mir::Index inputId = id; uint32_t end = _inShape.dim(_axis); for (uint32_t i = 0; i < end; ++i) { @@ -64,17 +56,13 @@ public: } private: - const Shape &_inShape; - const Shape &_outputShape; - const Tensor _input; + const mir::Shape &_inShape; + const mir::Shape &_outputShape; + const mir::Tensor _input; const uint32_t _axis; const std::function _reduceFunc; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_REDUCE_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Reshape.h b/contrib/nnc/passes/interpreter/ops/Reshape.h index e10ace2..bba1d8a 100644 --- a/contrib/nnc/passes/interpreter/ops/Reshape.h +++ b/contrib/nnc/passes/interpreter/ops/Reshape.h @@ -22,53 +22,39 @@ #include "OperationImpl.h" #include "Fill.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::IR::model::ops::ReshapeOp; template class Reshape : public OperationImpl { public: - Reshape(const TensorVariant &input, const ReshapeOp &op) : _input(input), _op(op) + Reshape(const mir::TensorVariant &input, const mir::ops::ReshapeOp &op) : _input(input), _op(op) { assert(num_elements(_op.getInputShape(0)) == num_elements(_op.getOutputShape(0))); } - std::vector operator()() override + std::vector operator()() override { - const Shape &outShape = _op.getOutputShape(0); - const Shape &inShape = _op.getInputShape(0); + const mir::Shape &outShape = _op.getOutputShape(0); + const mir::Shape &inShape = _op.getInputShape(0); - ShapeRange inRange(inShape); - ShapeRange outRange(outShape); + mir::ShapeRange inRange(inShape); + mir::ShapeRange outRange(outShape); auto inIter = inRange.begin(); auto out = OperationImpl::allocate_tensor(outShape); - Tensor outAccessor(out); + mir::Tensor outAccessor(out); // Shapes element count compared in Reshape ctor - return Fill(outShape, [this, &inIter](const Index &) -> float { return _input.at(*inIter++); })(); + return Fill(outShape, [this, &inIter](const mir::Index &) -> float { return _input.at(*inIter++); })(); } private: - Tensor _input; - const ReshapeOp &_op; + mir::Tensor _input; + const mir::ops::ReshapeOp &_op; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_RESHAPE_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Scale.cpp b/contrib/nnc/passes/interpreter/ops/Scale.cpp index b8732e5..32fe358 100644 --- a/contrib/nnc/passes/interpreter/ops/Scale.cpp +++ b/contrib/nnc/passes/interpreter/ops/Scale.cpp @@ -18,28 +18,16 @@ #include "Fill.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { -std::vector Scale::operator()() +std::vector Scale::operator()() { //For now handles only most common case with scale applied by last dimension - Tensor weightsAccessor(_op.getWeights()); - return Fill(_input.getShape(), [this, weightsAccessor](const Index &idx) { + mir::Tensor weightsAccessor(_op.getWeights()); + return Fill(_input.getShape(), [this, weightsAccessor](const mir::Index &idx) { return _input.at(idx) * weightsAccessor.at({idx.at(idx.rank() - 1)}); })(); } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/Scale.h b/contrib/nnc/passes/interpreter/ops/Scale.h index 2cb4fc8..e1bbd2e 100644 --- a/contrib/nnc/passes/interpreter/ops/Scale.h +++ b/contrib/nnc/passes/interpreter/ops/Scale.h @@ -21,45 +21,31 @@ #include "core/modelIR/operations/scale_op.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { /** * @brief Implements ScaleOp for interpreter backend * @todo check if I need support for any datatypes other than DTYPE::FLOAT */ -using nncc::contrib::core::IR::model::ops::ScaleOp; - class Scale : public OperationImpl { public: /** * @param in input data * @param op scale operation description */ - explicit Scale(const TensorVariant& in, const ScaleOp& op) : _input(in), _op(op) {} + explicit Scale(const mir::TensorVariant& in, const mir::ops::ScaleOp& op) : _input(in), _op(op) {} /** * @brief computes operation aplication result * @return vector of all outputs from this node */ - std::vector operator()() override; + std::vector operator()() override; private: - Tensor _input; - const ScaleOp& _op; + mir::Tensor _input; + const mir::ops::ScaleOp& _op; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/Softmax.h b/contrib/nnc/passes/interpreter/ops/Softmax.h index 2c7c2e8..0a52d6d 100644 --- a/contrib/nnc/passes/interpreter/ops/Softmax.h +++ b/contrib/nnc/passes/interpreter/ops/Softmax.h @@ -26,54 +26,42 @@ #include "Elementwise.h" #include "Reduce.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { class Softmax : public OperationImpl { public: - Softmax(const Shape &inputShape, const TensorVariant &input, uint32_t axis) + Softmax(const mir::Shape &inputShape, const mir::TensorVariant &input, uint32_t axis) : _inShape(inputShape), _axis(axis), _input(input) { } - std::vector operator()() override + std::vector operator()() override { - Tensor inputAccessor(_input); + mir::Tensor inputAccessor(_input); - Shape expsumShape = _inShape; + mir::Shape expsumShape = _inShape; expsumShape.dim(_axis) = 1; - TensorVariant expsum = + mir::TensorVariant expsum = Reduce(_inShape, expsumShape, _input, _axis, [](float expsum, float item) { return expsum + std::exp(item); })()[0]; - Tensor expsumAccessor(expsum); + mir::Tensor expsumAccessor(expsum); - return Fill(_inShape, [&inputAccessor, &expsumAccessor, this](const Index &id) { - Index expsumIndex = id; + return Fill(_inShape, [&inputAccessor, &expsumAccessor, this](const mir::Index &id) { + mir::Index expsumIndex = id; expsumIndex.at(_axis) = 0; return std::exp(inputAccessor.at(id)) / expsumAccessor.at(expsumIndex); })(); }; private: - const Shape &_inShape; + const mir::Shape &_inShape; const uint32_t _axis; - const TensorVariant _input; + const mir::TensorVariant _input; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_SOFTMAX_IMPL_ diff --git a/contrib/nnc/passes/interpreter/ops/common.cpp b/contrib/nnc/passes/interpreter/ops/common.cpp index b015dba..b9b9f17 100644 --- a/contrib/nnc/passes/interpreter/ops/common.cpp +++ b/contrib/nnc/passes/interpreter/ops/common.cpp @@ -18,17 +18,11 @@ #include "common.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { +using namespace mir; + void translate(Index &translatedIndex, const Index &sourceIndex, const Index &kernelIndex, const Shape &strides, const Index &paddings) { @@ -40,8 +34,4 @@ void translate(Index &translatedIndex, const Index &sourceIndex, const Index &ke } } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/common.h b/contrib/nnc/passes/interpreter/ops/common.h index 900019b..8da7dd5 100644 --- a/contrib/nnc/passes/interpreter/ops/common.h +++ b/contrib/nnc/passes/interpreter/ops/common.h @@ -19,20 +19,8 @@ #include "core/modelIR/Index.h" #include "core/modelIR/Shape.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ - -using nncc::contrib::core::data::Index; -using nncc::contrib::core::data::Shape; - /// /// Get current input element index using output index, current kernel index, strides and paddings @@ -42,11 +30,7 @@ using nncc::contrib::core::data::Shape; /// \param[in] kernelIndex current kernel element /// \param[in] strides /// \param[in] paddings -void translate(Index &translatedIndex, const Index &sourceIndex, const Index &kernelIndex, - const Shape &strides, const Index &paddings); +void translate(mir::Index &translatedIndex, const mir::Index &sourceIndex, const mir::Index &kernelIndex, + const mir::Shape &strides, const mir::Index &paddings); -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/conv_2D.cpp b/contrib/nnc/passes/interpreter/ops/conv_2D.cpp index 077e298..dfc9a5a 100644 --- a/contrib/nnc/passes/interpreter/ops/conv_2D.cpp +++ b/contrib/nnc/passes/interpreter/ops/conv_2D.cpp @@ -21,18 +21,11 @@ #include "conv_2D.h" #include "common.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { -using namespace nncc::contrib::core::data; +using namespace mir; +using namespace mir::ops; Index reduce(const Index &idx) { @@ -105,8 +98,4 @@ Conv2D::Conv2D(const TensorVariant &input, const Conv2DOp &op) assert(_op.getPadding(2) == 0); } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/conv_2D.h b/contrib/nnc/passes/interpreter/ops/conv_2D.h index 2ea8694..d93e1ef 100644 --- a/contrib/nnc/passes/interpreter/ops/conv_2D.h +++ b/contrib/nnc/passes/interpreter/ops/conv_2D.h @@ -20,39 +20,25 @@ #include "OperationImpl.h" #include "core/modelIR/operations/conv_2d_op.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ -using nncc::contrib::core::IR::model::ops::Conv2DOp; -using nncc::contrib::core::IR::model::ops::PaddingType; class Conv2D : public OperationImpl { public: - explicit Conv2D(const TensorVariant &input, const Conv2DOp &op); - std::vector operator()() override; + explicit Conv2D(const mir::TensorVariant &input, const mir::ops::Conv2DOp &op); + std::vector operator()() override; private: - const Tensor _input; - Tensor _kernel; - const Shape _strides; - const PaddingType _padding; - const Shape &_out_shape; - const Conv2DOp &_op; + const mir::Tensor _input; + mir::Tensor _kernel; + const mir::Shape _strides; + const mir::ops::PaddingType _padding; + const mir::Shape &_out_shape; + const mir::ops::Conv2DOp &_op; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_CONV2D_IMPL diff --git a/contrib/nnc/passes/interpreter/ops/conv_FFT.cpp b/contrib/nnc/passes/interpreter/ops/conv_FFT.cpp index 9760f8d..f07f02a 100644 --- a/contrib/nnc/passes/interpreter/ops/conv_FFT.cpp +++ b/contrib/nnc/passes/interpreter/ops/conv_FFT.cpp @@ -21,18 +21,11 @@ #include "conv_FFT.h" #include "common.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl +namespace nnc { -using namespace nncc::contrib::core::data; +using namespace mir; +using namespace mir::ops; // Mostly compatible with tensorflow implementation // Assuming input is in NHWC format with batch omitted( [in_height, in_width, in_channels] ) @@ -294,8 +287,4 @@ void Conv2D_FFT::ifft_CT(FFT_complex* array, } } -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/interpreter/ops/conv_FFT.h b/contrib/nnc/passes/interpreter/ops/conv_FFT.h index ab87dd6..982a94e 100644 --- a/contrib/nnc/passes/interpreter/ops/conv_FFT.h +++ b/contrib/nnc/passes/interpreter/ops/conv_FFT.h @@ -44,19 +44,8 @@ #include "OperationImpl.h" #include "core/modelIR/operations/conv_2d_op.h" -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace interpreter -{ -namespace impl -{ -using nncc::contrib::core::IR::model::ops::Conv2DOp; -using nncc::contrib::core::IR::model::ops::PaddingType; -using nncc::contrib::core::data::Tensor; typedef std::complex FFT_complex; @@ -64,19 +53,19 @@ typedef std::complex FFT_complex; class Conv2D_FFT : public OperationImpl { public: - explicit Conv2D_FFT(const TensorVariant &input, const Conv2DOp &op); - std::vector operator()() override; + explicit Conv2D_FFT(const mir::TensorVariant &input, const mir::ops::Conv2DOp &op); + std::vector operator()() override; protected: /// /// Pad input (with zeroes) according to selected padding type (paddings are calculated in ShapeInference) /// - std::vector pad_input(const Index &pads); + std::vector pad_input(const mir::Index &pads); /// /// Unpack kernels for each out_channel and pad them with zeroes to input size /// - std::vector> unpack_and_pad_kernels(const Shape &paddedInputShape, const uint64_t spectreSize); + std::vector> unpack_and_pad_kernels(const mir::Shape &paddedInputShape, const uint64_t spectreSize); /// /// This function performs elementwise product of input by each kernel @@ -87,11 +76,11 @@ protected: /// /// Perform Inverse Fast Fourier transform on elementwise products results. Return result of the convolution. /// - TensorVariant ifft(std::vector> &spectres, - const Shape &inShape, - const Shape &outShape, - const Shape &strides, - const Index &paddings); + mir::TensorVariant ifft(std::vector> &spectres, + const mir::Shape &inShape, + const mir::Shape &outShape, + const mir::Shape &strides, + const mir::Index &paddings); /// /// Separate even/odd elements to lower/upper halves of array respectively. @@ -111,18 +100,14 @@ protected: void ifft_CT(FFT_complex* array, const uint64_t elements); private: - const Tensor _input; - Tensor _kernel; - const Shape _strides; - const PaddingType _padding; - const Shape &_out_shape; - const Conv2DOp &_op; + const mir::Tensor _input; + mir::Tensor _kernel; + const mir::Shape _strides; + const mir::ops::PaddingType _padding; + const mir::Shape &_out_shape; + const mir::ops::Conv2DOp &_op; }; -} // namespace impl -} // namespace interpreter -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_CORE_BACKEND_INTERPRETER_CONV2D_FFT_IMPL_ diff --git a/contrib/nnc/passes/soft_backend/BaseGenerator.cpp b/contrib/nnc/passes/soft_backend/BaseGenerator.cpp index fe4bedb..e893529 100644 --- a/contrib/nnc/passes/soft_backend/BaseGenerator.cpp +++ b/contrib/nnc/passes/soft_backend/BaseGenerator.cpp @@ -35,17 +35,8 @@ #include using namespace std; -using namespace nncc::contrib; -using namespace nncc::contrib::pass; -using namespace nncc::contrib::core::IR::model; -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { namespace @@ -86,7 +77,7 @@ void createDir(const string &path) BaseCodeGenerator::BaseCodeGenerator() { - string basePath = clopt::artifactDir + "/" + clopt::artifactName; + string basePath = cli::artifactDir + "/" + cli::artifactName; _headerPath = basePath + ".h"; _codePath = basePath + ".cpp"; _paramsPath = basePath + ".params"; @@ -121,11 +112,11 @@ void BaseCodeGenerator::materializeModelParams(ostream &out, const Serializer &s PassData BaseCodeGenerator::run(PassData data) { - auto g = static_cast(data); + auto g = static_cast(data); assert(g); // inference shapes - core::IR::model::ShapeInference si; + mir::ShapeInference si; g->accept(&si); // visit and analyze graph ModelAnalyzer ma; @@ -136,7 +127,7 @@ PassData BaseCodeGenerator::run(PassData data) // rename tensors for specific backend language formatTensorNames(ma); - createDir(clopt::artifactDir); + createDir(cli::artifactDir); // Print header auto headerStream = getStream(_headerPath); @@ -156,8 +147,5 @@ PassData BaseCodeGenerator::run(PassData data) return nullptr; } -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/CGenerator.cpp b/contrib/nnc/passes/soft_backend/CGenerator.cpp index 73a6fee..db0b45b 100644 --- a/contrib/nnc/passes/soft_backend/CGenerator.cpp +++ b/contrib/nnc/passes/soft_backend/CGenerator.cpp @@ -18,16 +18,8 @@ #include "ModelAnalyzer.h" using namespace std; -using namespace nncc::contrib; -using namespace nncc::contrib::core::IR::model; -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { void CCodeGenerator::formatTensorNames(const ModelAnalyzer &ma) @@ -51,7 +43,4 @@ Pass &CCodeGenerator::getInstance() return instance; } -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp index 68eca6a..211f10e 100644 --- a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp +++ b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp @@ -20,8 +20,6 @@ #include "option/Options.h" using namespace std; -using namespace nncc::contrib; -using namespace nncc::contrib::core::IR::model; #include "CommonData.def" @@ -43,13 +41,7 @@ using namespace nncc::contrib::core::IR::model; #include "cpp_dropout.generated.h" #include "cpp_batchnorm.generated.h" -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { using TensorType = TensorDescription::Type; @@ -209,7 +201,7 @@ void CPPCodeGenerator::printSetter(ostream &out, const string &className, const out << "bool " << className << "::set" << setterName << "(const Tensor& t)\n" "{\n"; // need to insert input correctness check - const core::data::Shape expected = td._shape; + const mir::Shape expected = td._shape; int rank = expected.rank(); if (rank != 0) { @@ -268,7 +260,7 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co { string className = ma.getModelName() + "Model"; - out << "#include \"" << clopt::artifactName << ".h\"\n"; + out << "#include \"" << cli::artifactName << ".h\"\n"; // put operations from tflite out.write(eigen, sizeof(eigen)); @@ -352,7 +344,4 @@ Pass &CPPCodeGenerator::getInstance() return cppCodeGenerator; } -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp index 62b18a2..c25f368 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp @@ -38,21 +38,12 @@ using namespace std; -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::Index; -using nncc::contrib::core::data::ShapeRange; -using nncc::contrib::core::ADT::TensorVariant; +using namespace nnc::mir; -void ModelAnalyzer::addOpDescr(ADT::INode *node, const string &opName) +void ModelAnalyzer::addOpDescr(INode *node, const string &opName) { OpDescr::Type type = OpDescr::Type::ORDINARY; vector nodeOutputs; @@ -89,10 +80,10 @@ void ModelAnalyzer::addOpDescr(ADT::INode *node, const string &opName) } // process node inputs vector nodeInputs; - for (const ADT::INode::IODescriptor &d: node->getPrevNodes()) + for (const INode::IODescriptor &d: node->getPrevNodes()) { size_t idx = d.index; - ADT::INode *node = d.node; + INode *node = d.node; assert(_nodeToDescr.find(node) != _nodeToDescr.end()); const OpDescr &descr = *_nodeToDescr[node]; const size_t &inTid = descr._outputs[idx]; @@ -121,22 +112,22 @@ size_t ModelAnalyzer::allocateTensor(const string &name, TensorDescription::Type return id; } -void ModelAnalyzer::visit(ADT::INode *node, ops::ConcatOp &op) +void ModelAnalyzer::visit(INode *node, ops::ConcatOp &op) { addOpDescr(node, "concat"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::Conv2DOp &op) +void ModelAnalyzer::visit(INode *node, ops::Conv2DOp &op) { addOpDescr(node, "conv2d"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) +void ModelAnalyzer::visit(INode *node, ops::DepthwiseConv2DOp &op) { addOpDescr(node, "depthwiseConv2d"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::SoftmaxOp &op) +void ModelAnalyzer::visit(INode *node, ops::SoftmaxOp &op) { addOpDescr(node, "softmax"); } @@ -145,7 +136,7 @@ void ModelAnalyzer::visit(ADT::INode *node, ops::SoftmaxOp &op) * Model Ir does not separate different types of pool operations, but for code generation * it is easier to implement different types of pooling by different functions */ -void ModelAnalyzer::visit(ADT::INode *node, ops::PoolOp &op) +void ModelAnalyzer::visit(INode *node, ops::PoolOp &op) { const char *funcName = nullptr; switch (op.getPoolingType()) @@ -162,53 +153,50 @@ void ModelAnalyzer::visit(ADT::INode *node, ops::PoolOp &op) addOpDescr(node, funcName); } -void ModelAnalyzer::visit(ADT::INode *node, ops::FullyConnectedOp &op) +void ModelAnalyzer::visit(INode *node, ops::FullyConnectedOp &op) { addOpDescr(node, "fullConnect"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::CappedReluOp &op) +void ModelAnalyzer::visit(INode *node, ops::CappedReluOp &op) { addOpDescr(node, "cappedRelu"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::BiasAddOp &op) +void ModelAnalyzer::visit(INode *node, ops::BiasAddOp &op) { addOpDescr(node, "biasAdd"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::VariableOp &op) +void ModelAnalyzer::visit(INode *node, ops::VariableOp &op) { assert(node->getPrevNodes().empty()); addOpDescr(node, "in"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::ReluOp &op) +void ModelAnalyzer::visit(INode *node, ops::ReluOp &op) { addOpDescr(node, "relu"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::ReshapeOp &op) +void ModelAnalyzer::visit(INode *node, ops::ReshapeOp &op) { addOpDescr(node, "reshape"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::DropoutOp &op) +void ModelAnalyzer::visit(INode *node, ops::DropoutOp &op) { addOpDescr(node, "dropout"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::ScaleOp &op) +void ModelAnalyzer::visit(INode *node, ops::ScaleOp &op) { addOpDescr(node, "scale"); } -void ModelAnalyzer::visit(ADT::INode *node, ops::BatchNormOp &op) +void ModelAnalyzer::visit(INode *node, ops::BatchNormOp &op) { addOpDescr(node, "batchNorm"); } -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.h b/contrib/nnc/passes/soft_backend/ModelAnalyzer.h index 38c20e0..2371061 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.h +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.h @@ -28,18 +28,8 @@ #include #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace soft -{ - -namespace model = nncc::contrib::core::IR::model; -namespace ADT = model::ADT; -namespace ops = model::ops; const size_t INVALID_TENSOR_ID = std::numeric_limits::max(); @@ -59,7 +49,7 @@ struct TensorDescription Type _type; std::string _name; // if _shape.rank() == 0 - assume shape is not known for this tensor on compilation - core::data::Shape _shape; + mir::Shape _shape; }; /** @@ -75,7 +65,7 @@ struct OpDescr }; Type _type; - ADT::INode *_node; + mir::INode *_node; std::string _opName; // list of input tensors std::vector _inputs; @@ -88,23 +78,23 @@ struct OpDescr * @brief Constructs inference sequence for given computational graph, * gathers list of variables used in artifact. */ -class ModelAnalyzer: public model::IVisitor +class ModelAnalyzer: public mir::IVisitor { public: - void visit(ADT::INode *node, ops::ConcatOp &op) override; - void visit(ADT::INode *node, ops::Conv2DOp &op) override; - void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) override; - void visit(ADT::INode *node, ops::SoftmaxOp &op) override; - void visit(ADT::INode *node, ops::PoolOp &op) override; - void visit(ADT::INode *node, ops::FullyConnectedOp &op) override; - void visit(ADT::INode *node, ops::CappedReluOp &op) override; - void visit(ADT::INode *node, ops::BiasAddOp &op) override; - void visit(ADT::INode *node, ops::VariableOp &op) override; - void visit(ADT::INode *node, ops::ReluOp &op) override; - void visit(ADT::INode *node, ops::ReshapeOp &op) override; - void visit(ADT::INode *node, ops::ScaleOp &op) override; - void visit(ADT::INode *node, ops::BatchNormOp &op) override; - void visit(ADT::INode *node, ops::DropoutOp &op) override; + void visit(mir::INode *node, mir::ops::ConcatOp &op) override; + void visit(mir::INode *node, mir::ops::Conv2DOp &op) override; + void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override; + void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override; + void visit(mir::INode *node, mir::ops::PoolOp &op) override; + void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override; + void visit(mir::INode *node, mir::ops::CappedReluOp &op) override; + void visit(mir::INode *node, mir::ops::BiasAddOp &op) override; + void visit(mir::INode *node, mir::ops::VariableOp &op) override; + void visit(mir::INode *node, mir::ops::ReluOp &op) override; + void visit(mir::INode *node, mir::ops::ReshapeOp &op) override; + void visit(mir::INode *node, mir::ops::ScaleOp &op) override; + void visit(mir::INode *node, mir::ops::BatchNormOp &op) override; + void visit(mir::INode *node, mir::ops::DropoutOp &op) override; /** * @return vector of id's of network input tensors @@ -171,7 +161,7 @@ private: * Inserts information about CG operation into inference sequence: name of operation, * creates tensors for operation outputs, binds operation inputs with tensors from previous operations */ - void addOpDescr(ADT::INode *node, const std::string &name); + void addOpDescr(mir::INode *node, const std::string &name); enum class TensorType { @@ -188,7 +178,7 @@ private: */ size_t allocateTensor(const std::string &name = std::string(), TensorDescription::Type type = TensorDescription::Type::ORDINARY, - core::data::Shape *shape = nullptr); + mir::Shape *shape = nullptr); std::string _modelName = "NN"; std::list _inferenceSequence; @@ -197,12 +187,9 @@ private: std::vector _named_tensors; std::vector _outputs; std::vector _tensors; - std::map _nodeToDescr; + std::map _nodeToDescr; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_SOFT_BACKEND_MODEL_ANALYZER_H_ diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.cpp b/contrib/nnc/passes/soft_backend/SBSerializer.cpp index 74ad61a..85fad32 100644 --- a/contrib/nnc/passes/soft_backend/SBSerializer.cpp +++ b/contrib/nnc/passes/soft_backend/SBSerializer.cpp @@ -40,24 +40,21 @@ #define UNUSED(x) ((void)(x)) -namespace nncc -{ -namespace contrib -{ -namespace backend -{ -namespace soft +namespace nnc { static_assert(std::numeric_limits::is_iec559, "Unsupported float type"); using namespace std; -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::Index; -using nncc::contrib::core::data::ShapeRange; -using nncc::contrib::core::data::util::transposeTensor; -using nncc::contrib::core::ADT::TensorVariant; +using nnc::mir::Shape; +using nnc::mir::Index; +using nnc::mir::ShapeRange; +using nnc::mir::transposeTensor; +using nnc::mir::TensorVariant; +using nnc::mir::INode; + +namespace ops = nnc::mir::ops; namespace { @@ -148,7 +145,7 @@ void Serializer::serializePads(const Op &op, int32_t padsRank) } } -void Serializer::visit(ADT::INode *node, ops::ConcatOp &op) +void Serializer::visit(INode *node, ops::ConcatOp &op) { _curOp->_paramStartOffset = _buffer.size(); // axis number should fit into one byte @@ -157,7 +154,7 @@ void Serializer::visit(ADT::INode *node, ops::ConcatOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(ADT::INode *node, ops::Conv2DOp &op) +void Serializer::visit(INode *node, ops::Conv2DOp &op) { _curOp->_paramStartOffset = _buffer.size(); // serialize kernel @@ -174,7 +171,7 @@ void Serializer::visit(ADT::INode *node, ops::Conv2DOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) +void Serializer::visit(INode *node, ops::DepthwiseConv2DOp &op) { _curOp->_paramStartOffset = _buffer.size(); // serialize kernel @@ -189,7 +186,7 @@ void Serializer::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(ADT::INode *node, ops::SoftmaxOp &op) +void Serializer::visit(INode *node, ops::SoftmaxOp &op) { _curOp->_paramStartOffset = _buffer.size(); // axis number should fit into one byte @@ -197,7 +194,7 @@ void Serializer::visit(ADT::INode *node, ops::SoftmaxOp &op) serializeT(op.getAxis()); } -void Serializer::visit(ADT::INode *node, ops::PoolOp &op) +void Serializer::visit(INode *node, ops::PoolOp &op) { _curOp->_paramStartOffset = _buffer.size(); // serialize window shape @@ -219,14 +216,14 @@ void Serializer::visit(ADT::INode *node, ops::PoolOp &op) borderType = PoolBorderType::ZEROFILLED; break; default: - throw pass::PassException("Unsupported border type in pooling"); + throw PassException("Unsupported border type in pooling"); } serializeT(etoi(borderType)); // serialize output shape serializeShape(op.getOutputShape(0)); } -void Serializer::visit(ADT::INode *node, ops::FullyConnectedOp &op) +void Serializer::visit(INode *node, ops::FullyConnectedOp &op) { _curOp->_paramStartOffset = _buffer.size(); shared_ptr weights = make_shared(op.getWeights()); @@ -235,36 +232,36 @@ void Serializer::visit(ADT::INode *node, ops::FullyConnectedOp &op) serializeShape(op.getOutputShape(0)); } -void Serializer::visit(ADT::INode *node, ops::CappedReluOp &op) +void Serializer::visit(INode *node, ops::CappedReluOp &op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getCap()); } -void Serializer::visit(ADT::INode *node, ops::BiasAddOp &op) +void Serializer::visit(INode *node, ops::BiasAddOp &op) { _curOp->_paramStartOffset = _buffer.size(); serializeTensor(op.getWeights()); } -void Serializer::visit(ADT::INode *node, ops::VariableOp &op) +void Serializer::visit(INode *node, ops::VariableOp &op) { // no parameters to dump } -void Serializer::visit(ADT::INode *node, ops::ReluOp &op) +void Serializer::visit(INode *node, ops::ReluOp &op) { _curOp->_paramStartOffset = _buffer.size(); // no parameters to dump } -void Serializer::visit(ADT::INode *node, ops::ReshapeOp &op) +void Serializer::visit(INode *node, ops::ReshapeOp &op) { _curOp->_paramStartOffset = _buffer.size(); serializeShape(op.getOutputShape(0)); } -void Serializer::visit(ADT::INode *node, ops::BatchNormOp &op) +void Serializer::visit(INode *node, ops::BatchNormOp &op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getEps()); @@ -272,13 +269,13 @@ void Serializer::visit(ADT::INode *node, ops::BatchNormOp &op) serializeT(op.getSpatial()); } -void Serializer::visit(ADT::INode *node, ops::ScaleOp &op) +void Serializer::visit(INode *node, ops::ScaleOp &op) { _curOp->_paramStartOffset = _buffer.size(); serializeTensor(op.getWeights()); } -void Serializer::visit(ADT::INode *node, ops::DropoutOp &op) +void Serializer::visit(INode *node, ops::DropoutOp &op) { _curOp->_paramStartOffset = _buffer.size(); serializeT(op.getRate()); @@ -288,13 +285,10 @@ void Serializer::serialize(list &inferenceSequence) { for (OpDescr &descr: inferenceSequence) { - ADT::INode *node = descr._node; + INode *node = descr._node; _curOp = &descr; node->accept(this); } } -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.h b/contrib/nnc/passes/soft_backend/SBSerializer.h index 8b7f8b7..0774316 100644 --- a/contrib/nnc/passes/soft_backend/SBSerializer.h +++ b/contrib/nnc/passes/soft_backend/SBSerializer.h @@ -25,18 +25,8 @@ #include #include -namespace nncc +namespace nnc { -namespace contrib -{ -namespace backend -{ -namespace soft -{ - -namespace model = nncc::contrib::core::IR::model; -namespace ADT = model::ADT; -namespace ops = model::ops; /** * @brief Serializer of network parameters for soft backend @@ -48,24 +38,24 @@ namespace ops = model::ops; * To gather this vector use `getBuffer` method. * Objects of this class are one-off and not designed to serialize more than one IR */ -class Serializer: public model::IVisitor +class Serializer: public mir::IVisitor { public: - void visit(ADT::INode *node, ops::ConcatOp &op) override; - void visit(ADT::INode *node, ops::Conv2DOp &op) override; - void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) override; - void visit(ADT::INode *node, ops::SoftmaxOp &op) override; - void visit(ADT::INode *node, ops::PoolOp &op) override; - void visit(ADT::INode *node, ops::FullyConnectedOp &op) override; - void visit(ADT::INode *node, ops::CappedReluOp &op) override; - void visit(ADT::INode *node, ops::BiasAddOp &op) override; - void visit(ADT::INode *node, ops::VariableOp &op) override; - void visit(ADT::INode *node, ops::ReluOp &op) override; - void visit(ADT::INode *node, ops::ReshapeOp &op) override; - void visit(ADT::INode *node, ops::ScaleOp &op) override; - void visit(ADT::INode *node, ops::BatchNormOp &op) override; - void visit(ADT::INode *node, ops::DropoutOp &op) override; + void visit(mir::INode *node, mir::ops::ConcatOp &op) override; + void visit(mir::INode *node, mir::ops::Conv2DOp &op) override; + void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override; + void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override; + void visit(mir::INode *node, mir::ops::PoolOp &op) override; + void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override; + void visit(mir::INode *node, mir::ops::CappedReluOp &op) override; + void visit(mir::INode *node, mir::ops::BiasAddOp &op) override; + void visit(mir::INode *node, mir::ops::VariableOp &op) override; + void visit(mir::INode *node, mir::ops::ReluOp &op) override; + void visit(mir::INode *node, mir::ops::ReshapeOp &op) override; + void visit(mir::INode *node, mir::ops::ScaleOp &op) override; + void visit(mir::INode *node, mir::ops::BatchNormOp &op) override; + void visit(mir::INode *node, mir::ops::DropoutOp &op) override; void serialize(std::list &inferenceSequence); @@ -101,13 +91,13 @@ private: * @brief Serialize Tensor shape object * @param s shape to serialize */ - void serializeShape(const nncc::contrib::core::data::Shape &s); + void serializeShape(const nnc::mir::Shape &s); /** * @brief Function serializes type of given tensor base data, * it's shape and raw data in 'c' format(i.e. layout of multidimensional C array) * @param t Tensor to serialize */ - void serializeTensor(const contrib::core::ADT::TensorVariant &t); + void serializeTensor(const mir::TensorVariant &t); /** * @brief Serialize pads for operations like Conv2D * @tparam Op Operation type @@ -123,10 +113,7 @@ private: std::vector _buffer; }; -} // namespace soft -} // namespace backend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif //_NNC_SOFT_BACKEND_SERIALIZER_H_ diff --git a/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp b/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp index 1cccb0f..27d1f84 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp @@ -25,11 +25,7 @@ static std::ostream &operator<<(std::ostream &os, const flatbuffers::Vector *v) { diff --git a/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h b/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h index aa99138..446836a 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h @@ -24,11 +24,7 @@ using namespace v3_tflite; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -52,8 +48,6 @@ private: }; } // namespace tflite -} // namespace frontend -} // namespace contrib } // namespace nnc #endif // NNCC_TFLITE_DUMP_VISITOR_H diff --git a/contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp b/contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp index a999aee..69655d1 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp @@ -25,14 +25,8 @@ #include "tflite_v3_importer.h" -using namespace nncc::contrib; -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -45,19 +39,17 @@ Pass &TFLiteFrontend::getInstance() PassData TFLiteFrontend::run(PassData data) { - nncc::contrib::frontend::tflite::v3::TfliteImporter importer{clopt::inputFile}; + nnc::tflite::v3::TfliteImporter importer{cli::inputFile}; bool success = importer.import(); if (!success) { - throw PassException("Could not load model: " + clopt::inputFile + "\n"); + throw PassException("Could not load model: " + cli::inputFile + "\n"); } - return reinterpret_cast(importer.createIR()); + return reinterpret_cast(importer.createIR()); } } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h b/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h index 8966679..7ddffde 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h @@ -14,7 +14,6 @@ * limitations under the License. */ -using namespace nncc::contrib::frontend::common; class TfliteImporter : NNImporter { diff --git a/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp b/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp index c818c9c..be891ed 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp @@ -28,21 +28,16 @@ #include "tflite_ir_visitor.h" #include "tflite_op_creator.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { -using nncc::contrib::core::data::Index; -using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp; -using nncc::contrib::core::data::Shape; -using nncc::contrib::core::data::util::transposeTensor; +using nnc::mir::Index; +using VariableOp = nnc::mir::ops::VariableOp; +using nnc::mir::Shape; +using nnc::mir::transposeTensor; IrVisitor::IrVisitor() { @@ -72,10 +67,10 @@ void IrVisitor::visit(const SubGraph *s) auto node = graph->create(t->name()->c_str()); opsForTensorsTheyOutput[i] = node; - Shape inputShape = common::ShapeHelper::createShape(*t->shape(), t->shape()->size()); + Shape inputShape = ShapeHelper::createShape(*t->shape(), t->shape()->size()); // So far we assume that if the first dimension is equal to 1, // then it is the batch dimension and should be ignored - common::ShapeHelper::cutOffBatchDim(inputShape); + ShapeHelper::cutOffBatchDim(inputShape); node->getOperation()->setOutputShape(0, inputShape); } } @@ -233,7 +228,7 @@ std::shared_ptr IrVisitor::createTensor(const Tensor *t, const Buffer EnumNamesTensorType()[t->type()]); } - Shape tensorShape = common::ShapeHelper::createShape(*t->shape(), t->shape()->size()); + Shape tensorShape = ShapeHelper::createShape(*t->shape(), t->shape()->size()); return std::make_shared(tensorShape, tensorBufferCopy, type, elementSize); } @@ -261,6 +256,4 @@ void IrVisitor::setIrNodeNames() } } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h b/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h index fd40ac2..2eb3565 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h @@ -31,20 +31,16 @@ #include "tflite_visitor.h" #include "tflite_op_creator.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { using namespace v3_tflite; -using nncc::contrib::core::IR::model::Graph; -using nncc::contrib::core::IR::model::ADT::INode; -using IrTensor = nncc::contrib::core::ADT::TensorVariant; +using nnc::mir::Graph; +using nnc::mir::INode; +using IrTensor = nnc::mir::TensorVariant; class IrVisitor : public Visitor { @@ -86,8 +82,6 @@ private: }; } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_TFLITE_IR_VISITOR_H diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index fd7edbf..34728b0 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -28,13 +28,8 @@ #include "core/modelIR/operations/reshape_op.h" #include "pass/PassException.h" -using namespace nncc::contrib::pass; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -108,7 +103,7 @@ std::vector OpCreator::createReshape(InputOps inputs, InputParams pa // TODO: we should also support "-1" values in new_shape, which means that correct // shape values must be calculated. Better do it in the shape inference module. - Shape newShape = common::ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size()); + Shape newShape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size()); outputs[0]->getOperation()->setOutputShape(0, newShape); return outputs; @@ -168,6 +163,4 @@ void OpCreator::connectInputs(INode::Ref op, std::vector &inputs) } } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index 828bbae..a3e8011 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -32,22 +32,18 @@ #include "schema_v3.h" #include "passes/common_frontend/shape_helper.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { using namespace v3_tflite; -namespace ops = nncc::contrib::core::IR::model::ops; -using nncc::contrib::core::IR::model::Graph; -using nncc::contrib::core::IR::model::ADT::INode; -using IrTensor = nncc::contrib::core::ADT::TensorVariant; -using nncc::contrib::core::data::Shape; +namespace ops = nnc::mir::ops; +using nnc::mir::Graph; +using nnc::mir::INode; +using IrTensor = nnc::mir::TensorVariant; +using nnc::mir::Shape; class OpCreator { @@ -104,8 +100,6 @@ std::vector OpCreator::createOp(std::vector &inputs, } } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_TFLITE_OP_CREATOR_H diff --git a/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.cpp b/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.cpp index ba1d69c..26654bc 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.cpp @@ -21,11 +21,7 @@ #include "tflite_dump_visitor.h" #include "tflite_walker.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -38,6 +34,4 @@ using namespace ::v3_tflite; } // namespace v3 } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.h b/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.h index 95b997f..2a4236b 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_v3_importer.h @@ -24,11 +24,7 @@ #include "passes/common_frontend/nn_importer.h" #include "passes/common_frontend/model_allocation.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -39,9 +35,7 @@ using namespace ::v3_tflite; #include "tflite_importer.inline.h" } // namespace v3 -} // namespace contrib } // namespace tflite -} // namespace frontend -} // namespace nncc +} // namespace nnc #endif // NNCC_TFLITE_V3_IMPORTER_H diff --git a/contrib/nnc/passes/tflite_frontend/tflite_visitor.h b/contrib/nnc/passes/tflite_frontend/tflite_visitor.h index fbab146..86626b2 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_visitor.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_visitor.h @@ -21,11 +21,7 @@ using namespace v3_tflite; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -42,8 +38,6 @@ public: }; } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_TFLITE_VISITOR_H diff --git a/contrib/nnc/passes/tflite_frontend/tflite_walker.cpp b/contrib/nnc/passes/tflite_frontend/tflite_walker.cpp index af519a3..20f1c5a 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_walker.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_walker.cpp @@ -16,11 +16,7 @@ #include "tflite_walker.h" -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -44,6 +40,4 @@ void ModelWalker::walkContents(const OperatorCode *oc) {} void ModelWalker::walkContents(const Operator *) {} } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_walker.h b/contrib/nnc/passes/tflite_frontend/tflite_walker.h index f421765..32f3c25 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_walker.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_walker.h @@ -27,11 +27,7 @@ using namespace v3_tflite; -namespace nncc -{ -namespace contrib -{ -namespace frontend +namespace nnc { namespace tflite { @@ -80,8 +76,6 @@ void ModelWalker::walkVector(const flatbuffers::Vector> * } } // namespace tflite -} // namespace frontend -} // namespace contrib -} // namespace nncc +} // namespace nnc #endif // NNCC_TFLITE_WALKER_H diff --git a/contrib/nnc/support/CLOptionChecker.cpp b/contrib/nnc/support/CLOptionChecker.cpp index 324acc3..09b8e36 100644 --- a/contrib/nnc/support/CLOptionChecker.cpp +++ b/contrib/nnc/support/CLOptionChecker.cpp @@ -23,13 +23,9 @@ #include #include -using namespace nncc::contrib::clopt; - -namespace nncc -{ -namespace contrib +namespace nnc { -namespace clopt +namespace cli { void checkInFile(const Option &in_file) { @@ -89,6 +85,5 @@ void checkDebugFile(const Option &in_file) } } // checkDebugFile -} // clopt -} // contirb -} // nncc +} // namespace cli +} // namespace ncc diff --git a/contrib/nnc/support/CommandLine.cpp b/contrib/nnc/support/CommandLine.cpp index 5cf7854..f89bf4a 100644 --- a/contrib/nnc/support/CommandLine.cpp +++ b/contrib/nnc/support/CommandLine.cpp @@ -27,13 +27,9 @@ #include "support/CommandLine.h" -using namespace nncc::contrib::clopt; - -namespace nncc -{ -namespace contrib +namespace nnc { -namespace clopt +namespace cli { static std::vector splitByComma(const char *str) @@ -575,6 +571,5 @@ void Option::setValue(const std::string &val) this->setRawValue(this->template convToNum(val)); } -} // namespace clopt -} // namespace contrib -} // namespace nncc \ No newline at end of file +} // namespace cli +} // namespace nnc \ No newline at end of file diff --git a/contrib/nnc/tests/import/caffe.cpp b/contrib/nnc/tests/import/caffe.cpp index 5b812e5..cb40ea5 100644 --- a/contrib/nnc/tests/import/caffe.cpp +++ b/contrib/nnc/tests/import/caffe.cpp @@ -20,7 +20,7 @@ #include "caffe_importer.h" -using namespace nncc::contrib; +using namespace nnc; int main(int argc, const char **argv) { @@ -29,10 +29,10 @@ int main(int argc, const char **argv) return 1; } - clopt::CommandLine::getParser()->parseCommandLine(argc, argv); - std::string modelName = clopt::inputFile; + cli::CommandLine::getParser()->parseCommandLine(argc, argv); + std::string modelName = cli::inputFile; - nncc::contrib::frontend::caffe::CaffeImporter importer{modelName}; + nnc::caffe::CaffeImporter importer{modelName}; bool success = importer.import(); diff --git a/contrib/nnc/tests/import/tflite.cpp b/contrib/nnc/tests/import/tflite.cpp index a3989ff..8ed4ee9 100644 --- a/contrib/nnc/tests/import/tflite.cpp +++ b/contrib/nnc/tests/import/tflite.cpp @@ -20,7 +20,7 @@ #include "tflite_v3_importer.h" -using namespace nncc::contrib; +using namespace nnc; int main(int argc, const char **argv) { @@ -29,10 +29,10 @@ int main(int argc, const char **argv) return 1; } - clopt::CommandLine::getParser()->parseCommandLine(argc, argv); - std::string modelName = clopt::inputFile; + cli::CommandLine::getParser()->parseCommandLine(argc, argv); + std::string modelName = cli::inputFile; - nncc::contrib::frontend::tflite::v3::TfliteImporter importer{modelName}; + nnc::tflite::v3::TfliteImporter importer{modelName}; bool success = importer.import(); diff --git a/contrib/nnc/tests/interpreter/graph_creator.cpp b/contrib/nnc/tests/interpreter/graph_creator.cpp index 99a084f..877193a 100644 --- a/contrib/nnc/tests/interpreter/graph_creator.cpp +++ b/contrib/nnc/tests/interpreter/graph_creator.cpp @@ -36,8 +36,9 @@ #include "graph_creator.h" #include "op_info_util.h" -using namespace nncc::contrib::frontend::common; -using namespace nncc::contrib::core::IR::model; +using namespace nnc; +using namespace nnc::mir; + static INode::Ref createFullyConnected(std::unique_ptr& g, const opinfo::OperatorInfo* opInfo) { diff --git a/contrib/nnc/tests/interpreter/graph_creator.h b/contrib/nnc/tests/interpreter/graph_creator.h index 54cc7b1..9b84f7d 100644 --- a/contrib/nnc/tests/interpreter/graph_creator.h +++ b/contrib/nnc/tests/interpreter/graph_creator.h @@ -19,6 +19,6 @@ #include "core/modelIR/graph.h" -std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo); +std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo); #endif // NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H diff --git a/contrib/nnc/tests/interpreter/op_info_util.cpp b/contrib/nnc/tests/interpreter/op_info_util.cpp index ba25726..3bfd54d 100644 --- a/contrib/nnc/tests/interpreter/op_info_util.cpp +++ b/contrib/nnc/tests/interpreter/op_info_util.cpp @@ -24,6 +24,9 @@ #include "core/modelIR/Shape.h" #include "op_info_util.h" +using namespace nnc; +using namespace nnc::mir; + std::shared_ptr getTensor(const opinfo::Tensor* t) { std::shared_ptr tensorBufferCopy( @@ -84,8 +87,8 @@ int getAxis(const opinfo::OperatorInfo* opInfo) */ __attribute__ ((unused)) void printTensor(const TensorVariant& lhs) { - using nncc::contrib::core::data::ShapeRange; - using nncc::contrib::core::data::Tensor; + using nnc::mir::ShapeRange; + using nnc::mir::Tensor; Tensor accessor(lhs); @@ -126,8 +129,8 @@ static inline ::testing::AssertionResult areFloatsEqual(float f1, float f2, int void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs) { - using nncc::contrib::core::data::ShapeRange; - using nncc::contrib::core::data::Tensor; + using nnc::mir::ShapeRange; + using nnc::mir::Tensor; const int GTEST_FLOAT_EQ_ULP = 4; @@ -143,11 +146,9 @@ void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs) } // Having to put print operator to the same namespace as Shape so that it can be found -namespace nncc -{ -namespace core +namespace nnc { -namespace ADT +namespace mir { namespace tensor { @@ -166,6 +167,5 @@ std::ostream &operator<<(std::ostream &os, const Shape &sh) } } // namespace tensor -} // namespace ADT -} // namespace core -} // namespace nncc +} // namespace mir +} // namespace nnc diff --git a/contrib/nnc/tests/interpreter/op_info_util.h b/contrib/nnc/tests/interpreter/op_info_util.h index 8930989..5a2693d 100644 --- a/contrib/nnc/tests/interpreter/op_info_util.h +++ b/contrib/nnc/tests/interpreter/op_info_util.h @@ -30,17 +30,15 @@ #include "passes/common_frontend/shape_helper.h" #include "graph_creator.h" -using namespace nncc::contrib::frontend::common; -using namespace nncc::contrib::core::IR::model; - -std::shared_ptr getTensor(const opinfo::Tensor* t); -std::shared_ptr getKernel(const opinfo::OperatorInfo* opInfo); -ops::PaddingType getPaddingType(const opinfo::OperatorInfo* opInfo); -ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo); -Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n); + +std::shared_ptr getTensor(const opinfo::Tensor* t); +std::shared_ptr getKernel(const opinfo::OperatorInfo* opInfo); +nnc::mir::ops::PaddingType getPaddingType(const opinfo::OperatorInfo* opInfo); +nnc::mir::ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo); +nnc::mir::Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n); int getAxis(const opinfo::OperatorInfo* opInfo); -__attribute__ ((unused)) void printTensor(const TensorVariant& lhs); -void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs); +__attribute__ ((unused)) void printTensor(const nnc::mir::TensorVariant& lhs); +void assertTensorEq(const nnc::mir::TensorVariant &lhs, const nnc::mir::TensorVariant &rhs); #endif // NNC_INTERPRETER_OP_TEST_UTIL_H diff --git a/contrib/nnc/tests/interpreter/op_test.cpp b/contrib/nnc/tests/interpreter/op_test.cpp index f21b53e..6b87d1e 100644 --- a/contrib/nnc/tests/interpreter/op_test.cpp +++ b/contrib/nnc/tests/interpreter/op_test.cpp @@ -27,8 +27,8 @@ #include "graph_creator.h" using namespace opinfo; -using namespace nncc::contrib::core::IR::model; -using namespace nncc::contrib::backend::interpreter; +using namespace nnc; +using namespace nnc::mir; extern std::string opInfoBuf; extern const OperatorInfoList* list; @@ -40,7 +40,7 @@ TEST_P(InterpTestFixture, InterpTest) const OperatorInfo* opInfo = GetParam(); std::unique_ptr g = make_graph(opInfo); - core::NNInterpreter interpreter; + mir::NNInterpreter interpreter; for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i) { diff --git a/contrib/nnc/tests/soft_backend/CompileCPP.cpp b/contrib/nnc/tests/soft_backend/CompileCPP.cpp index 8ca1906..e91950a 100644 --- a/contrib/nnc/tests/soft_backend/CompileCPP.cpp +++ b/contrib/nnc/tests/soft_backend/CompileCPP.cpp @@ -41,9 +41,9 @@ using namespace std; -using namespace nncc::contrib; -using namespace nncc::contrib::core::data; -using namespace nncc::contrib::core::IR::model; +using namespace nnc; +using namespace nnc::mir; + // Creates simple graph with input and output void fillGraph(Graph &g) @@ -87,14 +87,14 @@ static void createMain(const string &path, const string &headerPath) int main(int argc, const char *argv[]) { - clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false); - std::string outputDir = clopt::artifactDir; - std::string artifactName = clopt::artifactName; + cli::CommandLine::getParser()->parseCommandLine(argc, argv, false); + std::string outputDir = cli::artifactDir; + std::string artifactName = cli::artifactName; Graph g; fillGraph(g); - nncc::contrib::backend::soft::CPPCodeGenerator::getInstance().run(&g); + nnc::CPPCodeGenerator::getInstance().run(&g); string basePath = outputDir + "/" + artifactName; diff --git a/contrib/nnc/unittests/core/ShapeInference.cpp b/contrib/nnc/unittests/core/ShapeInference.cpp index 02124d4..47f9dc8 100644 --- a/contrib/nnc/unittests/core/ShapeInference.cpp +++ b/contrib/nnc/unittests/core/ShapeInference.cpp @@ -21,8 +21,7 @@ #include "gtest/gtest.h" -using namespace nncc::contrib::core::IR::model; -using nncc::contrib::core::data::Shape; +using namespace nnc::mir; TEST(ShapeInferenceTest, ReshapeAutoDimension) { Graph g; diff --git a/contrib/nnc/unittests/core/ShapeRange.cpp b/contrib/nnc/unittests/core/ShapeRange.cpp index dfa858b..0593765 100644 --- a/contrib/nnc/unittests/core/ShapeRange.cpp +++ b/contrib/nnc/unittests/core/ShapeRange.cpp @@ -17,9 +17,9 @@ #include "gtest/gtest.h" #include "core/modelIR/ShapeRange.h" -namespace { +using namespace nnc::mir; -using namespace nncc::contrib::core::data; +namespace { struct ParamType { uint32_t actual_length; diff --git a/contrib/nnc/unittests/core/TensorVariant.cpp b/contrib/nnc/unittests/core/TensorVariant.cpp index 7f8c8bb..2a33428 100644 --- a/contrib/nnc/unittests/core/TensorVariant.cpp +++ b/contrib/nnc/unittests/core/TensorVariant.cpp @@ -18,8 +18,7 @@ #include -using namespace nncc::contrib::core::ADT; -using namespace nncc::contrib::core::data; +using namespace nnc::mir; TEST(TensorVariant, BasicTest) { Shape shape{2,2}; diff --git a/contrib/nnc/unittests/core/deserializer.cpp b/contrib/nnc/unittests/core/deserializer.cpp index 555b216..8e4cde6 100644 --- a/contrib/nnc/unittests/core/deserializer.cpp +++ b/contrib/nnc/unittests/core/deserializer.cpp @@ -20,9 +20,7 @@ #include "core/modelIR/ShapeRange.h" #include "core/modelIR/Tensor.h" -using namespace nncc::contrib::core; -using namespace nncc::contrib::core::data; -using namespace nncc::contrib::core::ADT; +using namespace nnc::mir; const double EPS = 0.0000001; diff --git a/contrib/nnc/unittests/core/ir_node.cpp b/contrib/nnc/unittests/core/ir_node.cpp index 9242764..00100fa 100644 --- a/contrib/nnc/unittests/core/ir_node.cpp +++ b/contrib/nnc/unittests/core/ir_node.cpp @@ -20,8 +20,9 @@ #include +using namespace nnc::mir; + TEST(IRNode, ConnectionTest) { - using namespace nncc::contrib::core::IR::model; auto node1 = Node::createNode("node1", 0); auto node2 = Node::createNode("node2", 1); diff --git a/contrib/nnc/unittests/core/operation.cpp b/contrib/nnc/unittests/core/operation.cpp index 79e937a..84bfec5 100644 --- a/contrib/nnc/unittests/core/operation.cpp +++ b/contrib/nnc/unittests/core/operation.cpp @@ -20,8 +20,7 @@ #include -using namespace nncc::contrib::core::IR::model; -using namespace nncc::contrib::core::data; +using namespace nnc::mir; TEST(OpDescription, InputOutputShapeTest) { Shape inShape{1,2,3}; diff --git a/contrib/nnc/unittests/core/serializer.cpp b/contrib/nnc/unittests/core/serializer.cpp index da605de..bbba716 100644 --- a/contrib/nnc/unittests/core/serializer.cpp +++ b/contrib/nnc/unittests/core/serializer.cpp @@ -20,9 +20,7 @@ #include "core/serialize/Serializer.h" #include "core/modelIR/ShapeRange.h" -using namespace nncc::contrib::core; -using namespace nncc::contrib::core::data; -using namespace nncc::contrib::core::ADT; +using namespace nnc::mir; const double EPS = 0.0000001; diff --git a/contrib/nnc/unittests/pass/PassExceptionTest.cpp b/contrib/nnc/unittests/pass/PassExceptionTest.cpp index 4d2e6ed..ee7a73d 100644 --- a/contrib/nnc/unittests/pass/PassExceptionTest.cpp +++ b/contrib/nnc/unittests/pass/PassExceptionTest.cpp @@ -18,7 +18,7 @@ #include "gtest/gtest.h" -using namespace nncc::contrib::pass; +using namespace nnc; const char *ErrorMsg = "error constructor"; diff --git a/contrib/nnc/unittests/pass/PassManagerTest.cpp b/contrib/nnc/unittests/pass/PassManagerTest.cpp index 4d992e6..c898251 100644 --- a/contrib/nnc/unittests/pass/PassManagerTest.cpp +++ b/contrib/nnc/unittests/pass/PassManagerTest.cpp @@ -24,16 +24,15 @@ #include "gtest/gtest.h" -using namespace nncc::contrib; -using namespace nncc::contrib::pass; -using namespace nncc::contrib::core::IR::model; +using namespace nnc; + class DummyPass1 : public Pass { public: PassData run(PassData data) override { - auto graph = static_cast(data); + auto graph = static_cast(data); if ( !graph ) { @@ -49,7 +48,7 @@ class DummyPass2 : public Pass public: PassData run(PassData data) override { - auto tv = static_cast(data); + auto tv = static_cast(data); if ( !tv ) { @@ -65,9 +64,9 @@ TEST(CONTRIB_PASS, PassManager) DummyPass1 pass1; DummyPass2 pass2; - Graph g; + mir::Graph g; auto res = pass1.run(&g); - ASSERT_NE(static_cast(res), nullptr); + ASSERT_NE(static_cast(res), nullptr); ASSERT_THROW(pass2.run(res), PassException); } diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index 7b4db31..9b277d2 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -72,11 +72,10 @@ #include "gtest/gtest.h" using namespace std; -using namespace nncc::contrib::backend::soft; -using namespace nncc::contrib::core::IR::model; // ShapeInference and Graph -using namespace nncc::contrib::core; // data namespace for TensorVariant, Tensor, Index and Shape -using namespace nncc::contrib::backend::interpreter; -namespace irOps = IR::model::ops; + +using namespace nnc; + +namespace irOps = nnc::mir::ops; /* This test suite operates with both artifact and NNC tensors: @@ -85,22 +84,22 @@ namespace irOps = IR::model::ops; For example: nShape, aShape, nTensor, aTensor. Artifact data types are: Tensor, Shape - NNC data types are: TensorVariant, tensor::Shape, data::Tensor + NNC data types are: mir::TensorVariant, tensor::Shape, mir::Tensor */ /** Creates graph with one operation generated by opGen function and returns this operation node*/ -INode *fillGraph(Graph &g, function opGen, - const vector> &inputNTensors) +mir::INode *fillGraph(mir::Graph &g, function opGen, + const vector> &inputNTensors) { // Create operation node - INode *opNode = opGen(g); + mir::INode *opNode = opGen(g); int numInputs = opNode->getPrevNodes().size(); assert(inputNTensors.size() == static_cast(numInputs)); for (int i = 0; i < numInputs; ++i) { // Create i-th input node - auto inputNode = g.create("x" + std::to_string(i)); + auto inputNode = g.create("x" + std::to_string(i)); // Connect i-th operation input to i-th input node opNode->connectInputTo(i, inputNode->getOutput(0)); @@ -113,13 +112,13 @@ INode *fillGraph(Graph &g, function opGen, g.markOutput(opNode); // Run shape inference - ShapeInference shapeInferencer; + mir::ShapeInference shapeInferencer; g.accept(&shapeInferencer); return opNode; } /** Fills NNC Shape object with data from src container*/ -void fillNShape(data::Shape &nshape, const vector &rawShapeData) +void fillNShape(mir::Shape &nshape, const vector &rawShapeData) { int shapeRank = rawShapeData.size(); nshape.resize(shapeRank); @@ -130,7 +129,7 @@ void fillNShape(data::Shape &nshape, const vector &rawShapeData) } /** Converts NNC Shape to artifact Shape*/ -void copyAShapeFromNShape(Shape &ashape, const data::Shape &src) +void copyAShapeFromNShape(Shape &ashape, const mir::Shape &src) { int shapeRank = src.rank(); ashape.setDims(shapeRank); @@ -141,41 +140,41 @@ void copyAShapeFromNShape(Shape &ashape, const data::Shape &src) } /** Fills NNC and artifact Shape objects with data from rawShapeData*/ -void fillShapes(data::Shape &nshape, Shape &ashape, const vector &rawShapeData) +void fillShapes(mir::Shape &nshape, Shape &ashape, const vector &rawShapeData) { fillNShape(nshape, rawShapeData); copyAShapeFromNShape(ashape, nshape); } /** Fills NNC tensor with some determined data*/ -void fillNTensor(TensorVariant &dst, float start) +void fillNTensor(mir::TensorVariant &dst, float start) { float t = start; - data::Tensor wrapper(dst); - for (data::Index idx: data::ShapeRange(dst.getShape())) + mir::Tensor wrapper(dst); + for (mir::Index idx: mir::ShapeRange(dst.getShape())) { wrapper.at(idx) = sin(t) * 2.0f; t += 1.0f; } } -TensorVariant createNTensor(data::Shape &shape, float start) +mir::TensorVariant createNTensor(mir::Shape &shape, float start) { shared_ptr dataBuf( - new char[sizeof(float)*data::num_elements(shape)], default_delete()); - TensorVariant tensor(shape, dataBuf, TensorVariant::DTYPE::FLOAT, sizeof(float)); + new char[sizeof(float)*mir::num_elements(shape)], default_delete()); + mir::TensorVariant tensor(shape, dataBuf, mir::TensorVariant::DTYPE::FLOAT, sizeof(float)); fillNTensor(tensor, start); return tensor; } -/** Converts NNC TensorVariant to artifact Tensor object*/ -void copyATensorFromNTensor(Tensor &dst, TensorVariant &src) +/** Converts NNC mir::TensorVariant to artifact Tensor object*/ +void copyATensorFromNTensor(Tensor &dst, mir::TensorVariant &src) { - data::Tensor wrapper(src); + mir::Tensor wrapper(src); Index artIdx; int rank = src.getShape().rank(); artIdx.setDims(rank); - for (data::Index idx: data::ShapeRange(src.getShape())) + for (mir::Index idx: mir::ShapeRange(src.getShape())) { for (int i = 0; i < rank; ++i) { @@ -186,23 +185,23 @@ void copyATensorFromNTensor(Tensor &dst, TensorVariant &src) } /** Fills NNC and artifact tensor objects with some determined data*/ -void fillTensors(unique_ptr &nTensor, Tensor &aTensor, const vector &shape, float start) +void fillTensors(unique_ptr &nTensor, Tensor &aTensor, const vector &shape, float start) { Shape aShape; - data::Shape nShape; + mir::Shape nShape; fillShapes(nShape, aShape, shape); aTensor.reShape(aShape); shared_ptr dataBuf( - new char[sizeof(float)*data::num_elements(nShape)], default_delete()); - nTensor.reset(new TensorVariant(nShape, dataBuf, TensorVariant::DTYPE::FLOAT, sizeof(float))); + new char[sizeof(float)*mir::num_elements(nShape)], default_delete()); + nTensor.reset(new mir::TensorVariant(nShape, dataBuf, mir::TensorVariant::DTYPE::FLOAT, sizeof(float))); fillNTensor(*nTensor, start); copyATensorFromNTensor(aTensor, *nTensor); } /** Run interpreter to get reference output data*/ -TensorVariant getReferenceTensor(Graph &g, const vector> &inputNTensors) +mir::TensorVariant getReferenceTensor(mir::Graph &g, const vector> &inputNTensors) { - core::NNInterpreter interpreter; + mir::NNInterpreter interpreter; for (int i = 0; i < static_cast(inputNTensors.size()); ++i) { interpreter.setInput("x" + to_string(i), *inputNTensors[i]); @@ -220,10 +219,10 @@ Tensor run(Operation op, const Args &...args) return output; } -/** Compares nnc TensorVariant and artifact Tensor objects*/ -void compareResults(const TensorVariant &refNTensor, const Tensor &testATensor) +/** Compares nnc mir::TensorVariant and artifact Tensor objects*/ +void compareResults(const mir::TensorVariant &refNTensor, const Tensor &testATensor) { - const data::Shape &tvShape = refNTensor.getShape(); + const mir::Shape &tvShape = refNTensor.getShape(); const Shape &atShape = testATensor.getShape(); ASSERT_EQ(tvShape.rank(), atShape.getDims()); int rank = tvShape.rank(); @@ -233,15 +232,15 @@ void compareResults(const TensorVariant &refNTensor, const Tensor &testATensor) } Index artifactIdx; artifactIdx.setDims(rank); - for (data::Index tvIdx: data::ShapeRange(tvShape)) + for (mir::Index tvIdx: mir::ShapeRange(tvShape)) { for (int i = 0; i < rank; ++i) { artifactIdx[i] = tvIdx.at(i); } - assert(refNTensor.getElementSize() == 4L && refNTensor.getDataType() == TensorVariant::DTYPE::FLOAT); + assert(refNTensor.getElementSize() == 4L && refNTensor.getDataType() == mir::TensorVariant::DTYPE::FLOAT); // Input and output data lies in range of [-10, 10], chosen epsilon lies near the edge of float type computational precision - ASSERT_NEAR(data::Tensor(refNTensor).at(tvIdx), testATensor.at(artifactIdx), 0.00001); + ASSERT_NEAR(mir::Tensor(refNTensor).at(tvIdx), testATensor.at(artifactIdx), 0.00001); } } @@ -249,11 +248,11 @@ void compareResults(const TensorVariant &refNTensor, const Tensor &testATensor) * This function creates test graph, runs interpeter, specifies artifact operation and compares results */ template -void createAndRunTestGraph(function opGenerator, TestFunc artifactOperation, - const vector> &inputNTensors, const Args &...inputATensors) +void createAndRunTestGraph(function opGenerator, TestFunc artifactOperation, + const vector> &inputNTensors, const Args &...inputATensors) { - Graph g; - INode *actualOperation = fillGraph(g, opGenerator, inputNTensors); + mir::Graph g; + mir::INode *actualOperation = fillGraph(g, opGenerator, inputNTensors); // serialize data for soft backend operation list inferenceSequence; @@ -264,7 +263,7 @@ void createAndRunTestGraph(function opGenerator, TestFunc arti serializer.serialize(inferenceSequence); assert(inferenceSequence.front()._paramStartOffset == 0); - TensorVariant referenceOutput = getReferenceTensor(g, inputNTensors); + mir::TensorVariant referenceOutput = getReferenceTensor(g, inputNTensors); Tensor testOutput; artifactOperation(testOutput, serializer.getBuffer().data(), inputATensors...); @@ -275,13 +274,13 @@ void createAndRunTestGraph(function opGenerator, TestFunc arti TEST(cpp_operations_test, bias) { vector inputShapeData{2, 3, 4, 5}; - data::Shape weightsShape{5}; - vector> inputNTensors(1); + mir::Shape weightsShape{5}; + vector> inputNTensors(1); Tensor aInputTensor; fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - TensorVariant weights = createNTensor(weightsShape, 1.0f); - auto opGenerator = [weights](Graph &g){return g.create("y", weights);}; + mir::TensorVariant weights = createNTensor(weightsShape, 1.0f); + auto opGenerator = [weights](mir::Graph &g){return g.create("y", weights);}; createAndRunTestGraph(opGenerator, biasAdd, inputNTensors, aInputTensor); } @@ -289,13 +288,13 @@ TEST(cpp_operations_test, bias) TEST(cpp_operations_test, scale) { vector inputShapeData{2, 3, 4, 5}; - data::Shape weightsShape{5}; - vector> inputNTensors(1); + mir::Shape weightsShape{5}; + vector> inputNTensors(1); Tensor aInputTensor; fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - TensorVariant weights = createNTensor(weightsShape, 1.0f); - auto opGenerator = [weights](Graph &g){return g.create("y", weights);}; + mir::TensorVariant weights = createNTensor(weightsShape, 1.0f); + auto opGenerator = [weights](mir::Graph &g){return g.create("y", weights);}; createAndRunTestGraph(opGenerator, scale, inputNTensors, aInputTensor); } @@ -307,9 +306,9 @@ TEST(cpp_operations_test, capped_relu) float cap = 0.5f; vector shapeData{2, 3, 4, 5}; Tensor aInputTensor; - vector> inputNTensors(1); + vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); - auto opGenerator = [cap](Graph &g){return g.create("y", cap);}; + auto opGenerator = [cap](mir::Graph &g){return g.create("y", cap);}; createAndRunTestGraph(opGenerator, cappedRelu, inputNTensors, aInputTensor); } @@ -327,10 +326,10 @@ TEST(cpp_operations_test, concat) // set different size for concatenating axis shape2Data[axis] = 11; vector inputATensors(2); - vector> inputNTensors(2); + vector> inputNTensors(2); fillTensors(inputNTensors[0], inputATensors[0], shape1Data, 1.0f); fillTensors(inputNTensors[1], inputATensors[1], shape2Data, 2.0f); - auto opGenerator = [axis](Graph &g) { return g.create("y", 2, axis); }; + auto opGenerator = [axis](mir::Graph &g) { return g.create("y", 2, axis); }; createAndRunTestGraph(opGenerator, concat, inputNTensors, inputATensors[0], inputATensors[1]); } @@ -351,16 +350,16 @@ TEST(cpp_operations_test, conv2d) for (iT strideW = 1; strideW <= 3; ++strideW) { vector inputShapeData{5, 7, static_cast(inputC)}; // HWC - data::Shape kernelShape{kernelH, kernelW, inputC, outputC}; // HWCN - data::Shape strides{strideH, strideW, 1}; - vector> inputNTensors(1); + mir::Shape kernelShape{kernelH, kernelW, inputC, outputC}; // HWCN + mir::Shape strides{strideH, strideW, 1}; + vector> inputNTensors(1); Tensor aInputTensor; fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - auto padT = IR::model::ops::PaddingType::Same; - TensorVariant kernel = createNTensor(kernelShape, 1.0f); - auto opGenerator = [kernel, strides, padT](Graph &g) + auto padT = mir::ops::PaddingType::Same; + mir::TensorVariant kernel = createNTensor(kernelShape, 1.0f); + auto opGenerator = [kernel, strides, padT](mir::Graph &g) { - return g.create("y", kernel, strides, padT); + return g.create("y", kernel, strides, padT); }; createAndRunTestGraph(opGenerator, conv2d, inputNTensors, aInputTensor); @@ -383,15 +382,15 @@ TEST(cpp_operations_tests, depthwise_conv) for (iT multiplier = 1; multiplier <= 2; ++multiplier) { vector inputShapeData{5, 7, static_cast(channels)}; // HWC - data::Shape kernelShape{kernelH, kernelW, channels, multiplier}; // HWCN - data::Shape strides{strideH, strideW, 1}; - vector> inputNTensors(1); + mir::Shape kernelShape{kernelH, kernelW, channels, multiplier}; // HWCN + mir::Shape strides{strideH, strideW, 1}; + vector> inputNTensors(1); Tensor aInputTensor; fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - auto padT = IR::model::ops::PaddingType::Same; - TensorVariant kernel = createNTensor(kernelShape, 1.0f); - auto opGenerator = [kernel, strides, padT](Graph &g) { - return g.create("y", kernel, strides, padT); + auto padT = mir::ops::PaddingType::Same; + mir::TensorVariant kernel = createNTensor(kernelShape, 1.0f); + auto opGenerator = [kernel, strides, padT](mir::Graph &g) { + return g.create("y", kernel, strides, padT); }; createAndRunTestGraph(opGenerator, depthwiseConv2d, inputNTensors, aInputTensor); @@ -401,12 +400,12 @@ TEST(cpp_operations_tests, depthwise_conv) TEST(cpp_operations_test, fully_connected) { vector inputShapeData{1, 13}; - data::Shape weightsShape{13, 7}; - vector> inputNTensors(1); + mir::Shape weightsShape{13, 7}; + vector> inputNTensors(1); Tensor aInputTensor; fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - TensorVariant weights = createNTensor(weightsShape, 1.0f); - auto opGenerator = [weights](Graph &g){return g.create("y", weights);}; + mir::TensorVariant weights = createNTensor(weightsShape, 1.0f); + auto opGenerator = [weights](mir::Graph &g){return g.create("y", weights);}; createAndRunTestGraph(opGenerator, fullConnect, inputNTensors, aInputTensor); } @@ -426,17 +425,17 @@ static void genericPoolTest(Func testFunc, const vector shapeData{5, 7, static_cast(channels)}; - data::Shape windowShape{windowH, windowW, 1}; - data::Shape strides{strideH, strideW, 1}; + mir::Shape windowShape{windowH, windowW, 1}; + mir::Shape strides{strideH, strideW, 1}; auto padT = irOps::PaddingType::Valid; Tensor aInputTensor; - vector> inputNTensors(1); + vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); for (auto border: borders) { - auto opGenerator = [windowShape, strides, padT, border](Graph &g) { - return g.create("y", windowShape, strides, poolT, padT, border); + auto opGenerator = [windowShape, strides, padT, border](mir::Graph &g) { + return g.create("y", windowShape, strides, poolT, padT, border); }; createAndRunTestGraph(opGenerator, testFunc, inputNTensors, aInputTensor); @@ -449,7 +448,7 @@ TEST(cpp_operations_test, maxpool) vector borderTypes{ irOps::PoolOp::BorderType::EMPTY }; - genericPoolTest(maxPool, borderTypes); + genericPoolTest(maxPool, borderTypes); } TEST(cpp_operations_test, avgpool) @@ -458,7 +457,7 @@ TEST(cpp_operations_test, avgpool) irOps::PoolOp::BorderType::EMPTY, irOps::PoolOp::BorderType::ZEROFILLED }; - genericPoolTest(avgPool, borderTypes); + genericPoolTest(avgPool, borderTypes); } TEST(cpp_operations_test, relu) @@ -466,9 +465,9 @@ TEST(cpp_operations_test, relu) // test prerequisites vector shapeData{2,3,4,5}; Tensor aInputTensor; - vector> inputNTensors(1); + vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); - auto opGenerator = [](Graph &g){return g.create("y");}; + auto opGenerator = [](mir::Graph &g){return g.create("y");}; createAndRunTestGraph(opGenerator, relu, inputNTensors, aInputTensor); } @@ -483,9 +482,9 @@ TEST(cpp_operations_test, softmax) shapeData.resize(numDims); int axis = numDims - 1; Tensor aInputTensor; - vector> inputNTensors(1); + vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f); - auto opGenerator = [axis](Graph &g) { return g.create("y", axis); }; + auto opGenerator = [axis](mir::Graph &g) { return g.create("y", axis); }; createAndRunTestGraph(opGenerator, softmax, inputNTensors, aInputTensor); } @@ -496,14 +495,14 @@ TEST(cpp_operations_test, reshape) // test prerequisites vector inputShapeData{2,3,4,5}; vector outputShapeData{1,120}; - data::Shape nOutputShape; + mir::Shape nOutputShape; fillNShape(nOutputShape, outputShapeData); Tensor aInputTensor; - vector> inputNTensors(1); + vector> inputNTensors(1); fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f); - auto opGenerator = [nOutputShape](Graph &g) + auto opGenerator = [nOutputShape](mir::Graph &g) { - auto op = g.create("y"); + auto op = g.create("y"); op->getOperation()->setOutputShape(0, nOutputShape); return op; }; diff --git a/contrib/nnc/unittests/soft_backend/Generator.cpp b/contrib/nnc/unittests/soft_backend/Generator.cpp index ccc4790..2c6a73f 100644 --- a/contrib/nnc/unittests/soft_backend/Generator.cpp +++ b/contrib/nnc/unittests/soft_backend/Generator.cpp @@ -29,10 +29,9 @@ #include "support/CommandLine.h" using namespace std; -using namespace nncc::contrib; -using namespace nncc::contrib::backend::soft; -using namespace nncc::contrib::core; -using namespace nncc::contrib::core::IR::model; + +using namespace nnc; +using namespace nnc::mir; static bool isFileExists(const string &path) { @@ -83,11 +82,11 @@ TEST(Generator, check_generator_call) nullptr}; int argc = (sizeof(argv) / sizeof(argv[0])) - 1; - clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false); + cli::CommandLine::getParser()->parseCommandLine(argc, argv, false); - nncc::contrib::core::IR::model::Graph g; + nnc::mir::Graph g; INode *input = g.create("input"); - input->getOperation()->setOutputShape(0, data::Shape({1,2,3,4})); + input->getOperation()->setOutputShape(0, Shape({1,2,3,4})); INode *output = g.create("output"); output->connectInputTo(0, input->getOutput(0)); diff --git a/contrib/nnc/unittests/support/CommandLineTest.cpp b/contrib/nnc/unittests/support/CommandLineTest.cpp index 84dfb52..1022c78 100644 --- a/contrib/nnc/unittests/support/CommandLineTest.cpp +++ b/contrib/nnc/unittests/support/CommandLineTest.cpp @@ -18,7 +18,7 @@ #include "gtest/gtest.h" -using namespace nncc::contrib::clopt; +using namespace nnc::cli; void soption_checker1(const Option &opt) -- 2.7.4