#include <algorithm>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
Index::Index(std::initializer_list<uint32_t> &&l) : _indices{l}
return s;
}
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include <algorithm>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
Shape::Shape(std::initializer_list<uint32_t> &&l) : _dims{l}
return s;
}
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/operations/scale_op.h"
#include "core/modelIR/operations/dropout_op.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
-using nncc::contrib::core::data::Shape;
+using nnc::mir::Shape;
template<class Op>
void fillHWShapesForPaddedOperations(Op &op, const Shape &windowShape, Shape &outShape)
op.setPadding(inRank - 1, 0);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::ConcatOp &op)
+void ShapeInference::visit(INode::Ref node, ops::ConcatOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::Conv2DOp &op)
+void ShapeInference::visit(INode::Ref node, ops::Conv2DOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::VariableOp &op)
+void ShapeInference::visit(INode::Ref node, ops::VariableOp &op)
{
(void)op;
(void)node;
// No need to do anything for inputs. These should be set by user
}
-void ShapeInference::fillInputShapes(ADT::INode::Ref node, OpDescription &op)
+void ShapeInference::fillInputShapes(INode::Ref node, OpDescription &op)
{
uint32_t i = 0;
for (auto &in : node->getPrevNodes())
}
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::ReluOp &op)
+void ShapeInference::visit(INode::Ref node, ops::ReluOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::SoftmaxOp &op)
+void ShapeInference::visit(INode::Ref node, ops::SoftmaxOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::PoolOp &op)
+void ShapeInference::visit(INode::Ref node, ops::PoolOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::FullyConnectedOp &op)
+void ShapeInference::visit(INode::Ref node, ops::FullyConnectedOp &op)
{
fillInputShapes(node, op);
const Shape &inShape = op.getInputShape(0);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::CappedReluOp &op)
+void ShapeInference::visit(INode::Ref node, ops::CappedReluOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op)
+void ShapeInference::visit(INode::Ref node, ops::DepthwiseConv2DOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::BiasAddOp &op)
+void ShapeInference::visit(INode::Ref node, ops::BiasAddOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::ReshapeOp &op)
+void ShapeInference::visit(INode::Ref node, ops::ReshapeOp &op)
{
// Reshape should have it's output shape filled by importer/user
fillInputShapes(node, op);
op.setOutputShape(0, outShape);
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::ScaleOp &op)
+void ShapeInference::visit(INode::Ref node, ops::ScaleOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::DropoutOp &op)
+void ShapeInference::visit(INode::Ref node, ops::DropoutOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-void ShapeInference::visit(ADT::INode::Ref node, ops::BatchNormOp &op)
+void ShapeInference::visit(INode::Ref node, ops::BatchNormOp &op)
{
fillInputShapes(node, op);
op.setOutputShape(0, op.getInputShape(0));
}
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/Tensor.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
template class Tensor<float>;
template class Tensor<double>;
template class Tensor<int>;
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/TensorVariant.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace ADT
+namespace mir
{
TensorVariant::TensorVariant(const Shape& shape, const std::shared_ptr<char>& data, TensorVariant::DTYPE dtype, size_t element_size)
return offset;
}
-} // namespace ADT
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/ir_node.h"
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace IR
-{
-namespace model {
INode::Ref Graph::getInput(const std::string &name) {
auto it = _inputs.find(name);
_outputs[node->getName()] = node;
}
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/ir_dot_builder.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace dumper
+namespace mir
{
void IrDotBuilder::updateWithNode(INode *node, const DotIrNodeInfo &irNodeInfo)
dot << node1->getId() << " -> " << node2->getId() << ";" << std::endl;
}
-} // namespace dumper
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/ir_dot_node_info.h"
#include "core/modelIR/ir_dot_dumper.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace dumper
-{
-
-using namespace nncc::contrib::core::data;
static std::vector<Shape> getInputShapes(OpDescription &op)
{
dotBuilder.updateWithNode(node, nodeInfo);
}
-} // namespace dumper
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/ir_dot_node_info.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace dumper
+namespace mir
{
DotIrNodeInfo &DotIrNodeInfo::withType(const std::string &typeName, const std::string &nodeName)
}
}
-} // namespace dumper
-} // namespace core
-} // namespace contrib
-} // namespace nncc
\ No newline at end of file
+} // namespace mir
+} // namespace nnc
\ No newline at end of file
#include "core/modelIR/ir_node.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
-const std::vector<ADT::INode::Ref> &ADT::AbstractNode::getNextNodes() const { return _outputs; }
+const std::vector<INode::Ref> &AbstractNode::getNextNodes() const { return _outputs; }
-const std::vector<ADT::INode::IODescriptor> &ADT::AbstractNode::getPrevNodes() const
+const std::vector<INode::IODescriptor> &AbstractNode::getPrevNodes() const
{
return _inputs;
}
-void ADT::AbstractNode::connectInputTo(const int inputIndex, const IODescriptor &descriptor)
+void AbstractNode::connectInputTo(const int inputIndex, const IODescriptor &descriptor)
{
AbstractNode *buf_ptr = dynamic_cast<AbstractNode *>(descriptor.node);
assert(buf_ptr);
_inputs[inputIndex] = descriptor;
}
-void ADT::AbstractNode::addNextNode(ADT::INode::Ref const node) { _outputs.emplace_back(node); }
+void AbstractNode::addNextNode(INode::Ref const node) { _outputs.emplace_back(node); }
-const ADT::INode::IODescriptor ADT::AbstractNode::getOutput(size_t index)
+const INode::IODescriptor AbstractNode::getOutput(size_t index)
{
return IODescriptor{.node = this, .index = index};
}
-ADT::AbstractNode::AbstractNode(size_t num_inputs) {
+AbstractNode::AbstractNode(size_t num_inputs) {
_inputs.resize(num_inputs);
}
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/operations/operation.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-namespace IR {
-namespace model {
-
-using namespace nncc::contrib::core::data;
+namespace nnc
+{
+namespace mir
+{
const Shape &OpDescription::getInputShape(const size_t index) const {
assert(index < getNumInputs());
size_t OpDescription::getNumOutputs() const { return _max_outputs; }
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/visitor.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-namespace IR {
-namespace model {
+namespace nnc
+{
+namespace mir
+{
-void Visitor::visit(ADT::INode *node, ops::ConcatOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::Conv2DOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::SoftmaxOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::PoolOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::FullyConnectedOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::CappedReluOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::BiasAddOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::VariableOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::ReluOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::ReshapeOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::ScaleOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::BatchNormOp &op) {(void)node; (void)op;};
-void Visitor::visit(ADT::INode *node, ops::DropoutOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::Conv2DOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::DepthwiseConv2DOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::SoftmaxOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::PoolOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::FullyConnectedOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::CappedReluOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::BiasAddOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::VariableOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::ReluOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::ReshapeOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::ScaleOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::BatchNormOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::DropoutOp &op) {(void)node; (void)op;};
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/ShapeRange.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-
-using namespace nncc::contrib::core::data;
-using namespace nncc::contrib::core::ADT;
+namespace nnc
+{
+namespace mir
+{
//
// Shape Deserialization
return deserializeFromMessage(objectAsMessage);
}
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/ShapeRange.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-
-using namespace nncc::contrib::core::data;
+namespace nnc
+{
+namespace mir
+{
template <class T>
void Serializer<T>::serializeToStream(const T& obj, std::ostream& stream)
return tensorProto.SerializeAsString();
}
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
// TODO: move to proto3
syntax = "proto2";
-package nncc.contrib.core.proto;
+package nnc.mir.proto;
enum DataType {
// Not a legal value for DataType. Used to indicate a DataType field
#include "option/Options.h"
#include "Driver.h"
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::frontend;
-using namespace nncc::contrib::backend;
-namespace nncc
-{
-namespace contrib
+namespace nnc
{
/**
{
Pass *pass;
- if ( clopt::caffeFrontend.isDisabled() && clopt::tflFrontend.isDisabled() )
+ if ( cli::caffeFrontend.isDisabled() && cli::tflFrontend.isDisabled() )
{
throw DriverException("frontends are not available");
}
- if ( clopt::caffeFrontend && clopt::tflFrontend )
+ if ( cli::caffeFrontend && cli::tflFrontend )
{
throw DriverException("only one of the following options are allowed"
" to be set in the same time: '"
- + clopt::caffeFrontend.getNames()[0] + "', '"
- + clopt::tflFrontend.getNames()[0] + "'");
+ + cli::caffeFrontend.getNames()[0] + "', '"
+ + cli::tflFrontend.getNames()[0] + "'");
}
- if ( clopt::caffeFrontend )
+ if ( cli::caffeFrontend )
{
#ifdef NNC_FRONTEND_CAFFE_ENABLED
pass = &caffe::CaffeFrontend::getInstance();
#endif // NNC_FRONTEND_CAFFE_ENABLED
}
- else if ( clopt::tflFrontend )
+ else if ( cli::tflFrontend )
{
#ifdef NNC_FRONTEND_TFLITE_ENABLED
pass = &tflite::TFLiteFrontend::getInstance();
else
{
throw DriverException("one of the following options must be defined: '"
- + clopt::caffeFrontend.getNames()[0] + "', '"
- + clopt::tflFrontend.getNames()[0] + "'");
+ + cli::caffeFrontend.getNames()[0] + "', '"
+ + cli::tflFrontend.getNames()[0] + "'");
}
PassManager::getPassManager()->registerPass(pass);
{
Pass *pass;
- if ( clopt::target == NNC_TARGET_X86_CPP )
+ if ( cli::target == NNC_TARGET_X86_CPP )
{
- pass = &soft::CPPCodeGenerator::getInstance();
+ pass = &CPPCodeGenerator::getInstance();
}
- else if (clopt::target == NNC_TARGET_ARM_GPU_CPP )
+ else if (cli::target == NNC_TARGET_ARM_GPU_CPP )
{
- pass = &soft::AclCPPCodeGenerator::getInstance();
+ pass = &AclCPPCodeGenerator::getInstance();
}
- else if ( clopt::target == NNC_TARGET_INTERPRETER )
+ else if ( cli::target == NNC_TARGET_INTERPRETER )
{
- pass = &interpreter::InterpreterPass::getInstance();
+ pass = &InterpreterPass::getInstance();
} else
{
} // runDriver
-} // contrib
-} // nncc
+} // namespace nnc
#include <string>
-namespace nncc
-{
-namespace contrib
+namespace nnc
{
/**
};
-} // contrib
-} // nncc
+} // namespace nnc
#endif //NNCC_DRIVER_H
#include "option/Options.h"
#include "Definitions.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace clopt
+namespace cli
{
/**
std::vector<std::string>{},
optional(true));
-} // namespace clopt
-} // namespace contrib
-} // namespace nncc
+} // namespace cli
+} // namespace nnc
#include "pass/PassException.h"
#include "Driver.h"
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
+using namespace nnc;
int main(int argc, const char *argv[])
{
try
{
// Parse command line
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv);
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv);
//
// run compiler pipeline:
#include "core/modelIR/ShapeInference.h"
#include "pass/PassException.h"
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::clopt;
-using namespace nncc::contrib::core::dumper;
+using namespace nnc;
+using namespace nnc::mir;
+using namespace nnc::cli;
enum Format {FormatDot, FormatDump};
int main(int argc, const char **argv)
{
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- std::string model = clopt::inputFile;
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
+ std::string model = cli::inputFile;
- nncc::contrib::frontend::caffe::CaffeImporter importer{model};
+ nnc::caffe::CaffeImporter importer{model};
if (!importer.import())
{
#include "core/modelIR/ir_dot_dumper.h"
#include "core/modelIR/ShapeInference.h"
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::clopt;
-using namespace nncc::contrib::core::dumper;
+using namespace nnc;
+using namespace nnc::mir;
+using namespace nnc::cli;
enum Format {FormatDot, FormatDump};
int main(int argc, const char **argv)
{
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- std::string model = clopt::inputFile;
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
+ std::string model = cli::inputFile;
- nncc::contrib::frontend::tflite::v3::TfliteImporter importer{model};
+ nnc::tflite::v3::TfliteImporter importer{model};
if (!importer.import())
{
try
{
IrDotDumper dotDumper;
- ShapeInference inf;
- auto g = static_cast<Graph *>(importer.createIR());
+ mir::ShapeInference inf;
+ auto g = static_cast<mir::Graph *>(importer.createIR());
g->accept(&inf);
g->accept(&dotDumper);
#include "core/modelIR/Region.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
template <typename T> class ExternalRegion final : public Region<T>
uint32_t const _size;
};
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // _NNC_CORE_LINALG_EXTERNAL_REGION_H_
#include <cstdint>
#include <ostream>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
class Index
std::ostream &operator<<(std::ostream &s, const Index &sh);
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // _NNC_CORE_LINALG_INDEX_H_
#include <cstdint>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
template<typename T>
virtual uint32_t size(void) const = 0;
};
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // _NNC_CORE_LINALG_REGION_H_
#include <cstdint>
#include <ostream>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
class Shape
bool operator==(const Shape &, const Shape &);
std::ostream &operator<<(std::ostream &s, const Shape &sh);
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // _NNC_CORE_LINALG_SHAPE_H_
#include "core/modelIR/visitor.h"
#include "core/modelIR/ir_node.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace IR
-{
-namespace model
-{
-
-using namespace nncc::contrib::core::IR::model;
class ShapeInference : public IVisitor {
public:
static const auto AUTO_DIM = std::numeric_limits<uint32_t>::max();
- void visit(ADT::INode::Ref node, ops::ConcatOp &op) override;
- void visit(ADT::INode::Ref node, ops::Conv2DOp &op) override;
- void visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) override;
- void visit(ADT::INode::Ref node, ops::ReluOp &op) override;
- void visit(ADT::INode::Ref node, ops::SoftmaxOp &op) override;
- void visit(ADT::INode::Ref node, ops::PoolOp &op) override;
- void visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) override;
- void visit(ADT::INode::Ref node, ops::CappedReluOp &op) override;
- void visit(ADT::INode::Ref node, ops::BiasAddOp &op) override;
- void visit(ADT::INode::Ref node, ops::ReshapeOp &op) override;
- void visit(ADT::INode::Ref node, ops::VariableOp &op) override;
- void visit(ADT::INode *node, ops::ScaleOp &op) override;
- void visit(ADT::INode *node, ops::BatchNormOp &op) override;
- void visit(ADT::INode *node, ops::DropoutOp &op) override;
+ void visit(INode::Ref node, ops::ConcatOp &op) override;
+ void visit(INode::Ref node, ops::Conv2DOp &op) override;
+ void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override;
+ void visit(INode::Ref node, ops::ReluOp &op) override;
+ void visit(INode::Ref node, ops::SoftmaxOp &op) override;
+ void visit(INode::Ref node, ops::PoolOp &op) override;
+ void visit(INode::Ref node, ops::FullyConnectedOp &op) override;
+ void visit(INode::Ref node, ops::CappedReluOp &op) override;
+ void visit(INode::Ref node, ops::BiasAddOp &op) override;
+ void visit(INode::Ref node, ops::ReshapeOp &op) override;
+ void visit(INode::Ref node, ops::VariableOp &op) override;
+ void visit(INode *node, ops::ScaleOp &op) override;
+ void visit(INode *node, ops::BatchNormOp &op) override;
+ void visit(INode *node, ops::DropoutOp &op) override;
protected:
- void fillInputShapes(ADT::INode::Ref node, OpDescription &op);
+ void fillInputShapes(INode::Ref node, OpDescription &op);
};
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_SHAPE_INFERENCE_
#include "core/modelIR/Shape.h"
#include "core/modelIR/Index.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace data
-{
-
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::Index;
class ShapeIter :
public std::iterator<std::forward_iterator_tag, Index, std::size_t, Index*, Index&> {
Shape& _shape;
};
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_LINALG_SHAPE_RANGE_H_
#include "core/modelIR/TensorVariant.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
+namespace mir
{
template<typename T>
public:
Tensor() = delete;
- explicit Tensor(const ADT::TensorVariant &t) : _proxy(t), _shape(t.getShape()) {
+ explicit Tensor(const TensorVariant &t) : _proxy(t), _shape(t.getShape()) {
}
T at(const Index &id) const {
virtual const Shape &getShape() const { return _proxy.getShape(); };
private:
- const ADT::TensorVariant& _proxy;
+ const TensorVariant& _proxy;
const Shape &_shape;
};
extern template
class Tensor<int>;
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/Index.h"
#include "core/modelIR/ShapeRange.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace data
-{
-namespace util
+namespace mir
{
-using nncc::contrib::core::data::Shape;
-using namespace nncc::contrib::core::ADT;
-using namespace nncc::contrib::core::data;
-
template<unsigned int... Ints>
static std::shared_ptr <TensorVariant>
transposeTensor(std::shared_ptr <TensorVariant> tensor)
{
- using nncc::contrib::core::data::Index;
-
const Shape &inShape = tensor->getShape();
Shape targetShape{inShape.dim(Ints)...};
return convertedTensor;
}
-} // namespace util
-} // namespace data
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // _NNC_CORE_LINALG_TENSOR_UTIL_H_
#include "core/modelIR/Index.h"
#include "core/modelIR/Shape.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-namespace ADT {
-
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::Index;
+namespace nnc
+{
+namespace mir
+{
constexpr int MAX_DIMENSIONS = 32;
size_t _element_size;
};
-} // namespace ADT
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_LINALG_TENSOR_VARIANT_H_
#include "core/modelIR/operations/variable_op.h"
#include "core/modelIR/ir_node.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace IR
-{
-namespace model {
-using ADT::INode;
class IVisitor;
class Graph {
std::unordered_map<std::string, INode::Ref> _outputs;
};
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_GRAPH_H_
#include "core/modelIR/ir_node.h"
#include "core/modelIR/ir_dot_node_info.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace dumper
-{
-
-using nncc::contrib::core::IR::model::ADT::INode;
/**
* @brief Provides an API to add nodes and edges to the .dot Model IR representation
std::stringstream dot;
};
-} // namespace dumper
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //NNCC_IR_DOT_BUILDER_H
#include "core/modelIR/ir_dot_builder.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace dumper
-{
-
-using nncc::contrib::core::IR::model::ADT::INode;
-using namespace nncc::contrib::core::IR::model;
/**
* @breif Model IR visitor that can be used to output Model IR as a .dot graph.
IrDotBuilder dotBuilder;
};
-} // namespace dumper
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // _NNC_BACKEND_INTERPRETER_CORE_DOTDUMPER_
#include "core/modelIR/operations/common.h"
#include "core/modelIR/operations/pool_op.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace dumper
-{
-
-using namespace nncc::contrib::core::IR::model;
-using namespace nncc::contrib::core::data;
/**
* @brief Can collect information about a NN operator, and then use it to output
PoolType poolType = PoolType::MAX;
};
-} // namespace dumper
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif // NNCC_IR_NODE_DOT_BUILDER_H
#include "core/modelIR/operations/operation.h"
#include "core/modelIR/visitor.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
-{
-
-namespace ADT
+namespace mir
{
class INode
struct IODescriptor
{
- ADT::INode* node; // Data source
+ INode* node; // Data source
size_t index; // Output id
};
virtual const std::vector<IODescriptor> &getPrevNodes() const = 0;
- virtual const std::vector<ADT::INode::Ref> &getNextNodes() const = 0;
+ virtual const std::vector<INode::Ref> &getNextNodes() const = 0;
virtual size_t getId() const = 0;
public:
explicit AbstractNode(size_t num_inputs);
const std::vector<IODescriptor> &getPrevNodes() const override;
- const std::vector<ADT::INode::Ref> &getNextNodes() const override;
+ const std::vector<INode::Ref> &getNextNodes() const override;
void connectInputTo(const int inputIndex, const IODescriptor &descriptor) override;
const IODescriptor getOutput(const size_t index) override;
protected:
- virtual void addNextNode(ADT::INode::Ref const node) override;
+ virtual void addNextNode(INode::Ref const node) override;
private:
std::vector<IODescriptor> _inputs;
- std::vector<ADT::INode::Ref> _outputs;
+ std::vector<INode::Ref> _outputs;
};
-} // namespace ADT
struct NodeProperties
{
};
template <typename OpType>
-class Node : public ADT::AbstractNode
+class Node : public AbstractNode
{
public:
OpType *getOperation() override { return static_cast<OpType*>(_props.op); }
NodeProperties _props;
};
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_NODE_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_BATCH_NORM_H_
#include "core/modelIR/operations/operation.h"
#include "core/modelIR/TensorVariant.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_BIAS_ADD_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-namespace IR {
-namespace model {
-namespace ops {
+namespace nnc
+{
+namespace mir
+{
+namespace ops
+{
class CappedReluOp : public OpDescription {
public:
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_CAPPED_RELU_H_
#ifndef _NNC_CORE_IR_MODEL_COMMON_H_
#define _NNC_CORE_IR_MODEL_COMMON_H_
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_COMMOND_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_CONCAT_OP_H_
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
-using nncc::contrib::core::data::Shape;
-
class Conv2DOp : public OpDescription
{
public:
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_CONV_2D_H_
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
-using nncc::contrib::core::data::Shape;
-
class DepthwiseConv2DOp : public OpDescription
{
public:
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_DEPTHWISE_CONV_2D_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-namespace IR {
-namespace model {
-namespace ops {
+namespace nnc
+{
+namespace mir
+{
+namespace ops
+{
class DropoutOp : public OpDescription {
public:
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_DROPOUT_H_
#include "core/modelIR/operations/operation.h"
#include "core/modelIR/TensorVariant.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
-using namespace nncc::contrib::core;
-
class FullyConnectedOp : public OpDescription
{
public:
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_FULLY_CONNECTED_OP_H_
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace core
-{
-namespace IR
-{
-namespace model
-{
-
-using nncc::contrib::core::ADT::TensorVariant;
class OpDescription {
public:
size_t getNumInputs() const;
size_t getNumOutputs() const;
- const nncc::contrib::core::data::Shape &getInputShape(const size_t index) const;
- virtual void setInputShape(const size_t index, const nncc::contrib::core::data::Shape &shape);
+ const nnc::mir::Shape &getInputShape(const size_t index) const;
+ virtual void setInputShape(const size_t index, const nnc::mir::Shape &shape);
- virtual const nncc::contrib::core::data::Shape &getOutputShape(const size_t index) const;
- void setOutputShape(const size_t index, const nncc::contrib::core::data::Shape &shape);
+ virtual const nnc::mir::Shape &getOutputShape(const size_t index) const;
+ void setOutputShape(const size_t index, const nnc::mir::Shape &shape);
private:
size_t _max_inputs;
size_t _max_outputs;
- std::map<size_t, nncc::contrib::core::data::Shape> _inputShapes;
- std::map<size_t, nncc::contrib::core::data::Shape> _outputShapes;
+ std::map<size_t, nnc::mir::Shape> _inputShapes;
+ std::map<size_t, nnc::mir::Shape> _outputShapes;
};
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_OPERATION_H_
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
-using nncc::contrib::core::data::Shape;
-
class PoolOp : public OpDescription
{
public:
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_POOL_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_RELU_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_SCALE_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_SOFTMAX_H_
#include "core/modelIR/operations/operation.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace core
-{
-namespace IR
-{
-namespace model
+namespace mir
{
namespace ops
{
};
} // namespace ops
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_VARIABLE_H_
#ifndef _NNC_CORE_IR_MODEL_VISITOR_H_
#define _NNC_CORE_IR_MODEL_VISITOR_H_
-namespace nncc {
-namespace contrib {
-namespace core {
-namespace IR {
-namespace model {
+namespace nnc
+{
+namespace mir
+{
-//Forward declare INode due to circular dependecies with INode::accept(Visitor*);
-namespace ADT {
- class INode;
-}
+class INode;
//Forward declare operations as we don't need anything but references
namespace ops
*/
class IVisitor {
public:
- virtual void visit(ADT::INode *node, ops::ConcatOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::Conv2DOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::SoftmaxOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::PoolOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::FullyConnectedOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::CappedReluOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::BiasAddOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::VariableOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::ReluOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::ReshapeOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::ScaleOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::BatchNormOp &op) = 0;
- virtual void visit(ADT::INode *node, ops::DropoutOp &op) = 0;
+ virtual void visit(INode *node, ops::ConcatOp &op) = 0;
+ virtual void visit(INode *node, ops::Conv2DOp &op) = 0;
+ virtual void visit(INode *node, ops::DepthwiseConv2DOp &op) = 0;
+ virtual void visit(INode *node, ops::SoftmaxOp &op) = 0;
+ virtual void visit(INode *node, ops::PoolOp &op) = 0;
+ virtual void visit(INode *node, ops::FullyConnectedOp &op) = 0;
+ virtual void visit(INode *node, ops::CappedReluOp &op) = 0;
+ virtual void visit(INode *node, ops::BiasAddOp &op) = 0;
+ virtual void visit(INode *node, ops::VariableOp &op) = 0;
+ virtual void visit(INode *node, ops::ReluOp &op) = 0;
+ virtual void visit(INode *node, ops::ReshapeOp &op) = 0;
+ virtual void visit(INode *node, ops::ScaleOp &op) = 0;
+ virtual void visit(INode *node, ops::BatchNormOp &op) = 0;
+ virtual void visit(INode *node, ops::DropoutOp &op) = 0;
virtual ~IVisitor() = default;
};
*/
class Visitor: public IVisitor{
public:
- void visit(ADT::INode *node, ops::ConcatOp &op) override;
- void visit(ADT::INode *node, ops::Conv2DOp &op) override;
- void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) override;
- void visit(ADT::INode *node, ops::SoftmaxOp &op) override;
- void visit(ADT::INode *node, ops::PoolOp &op) override;
- void visit(ADT::INode *node, ops::FullyConnectedOp &op) override;
- void visit(ADT::INode *node, ops::CappedReluOp &op) override;
- void visit(ADT::INode *node, ops::BiasAddOp &op) override;
- void visit(ADT::INode *node, ops::VariableOp &op) override;
- void visit(ADT::INode *node, ops::ReluOp &op) override;
- void visit(ADT::INode *node, ops::ReshapeOp &op) override;
- void visit(ADT::INode *node, ops::ScaleOp &op) override;
- void visit(ADT::INode *node, ops::BatchNormOp &op) override;
- void visit(ADT::INode *node, ops::DropoutOp &op) override;
+ void visit(INode *node, ops::ConcatOp &op) override;
+ void visit(INode *node, ops::Conv2DOp &op) override;
+ void visit(INode *node, ops::DepthwiseConv2DOp &op) override;
+ void visit(INode *node, ops::SoftmaxOp &op) override;
+ void visit(INode *node, ops::PoolOp &op) override;
+ void visit(INode *node, ops::FullyConnectedOp &op) override;
+ void visit(INode *node, ops::CappedReluOp &op) override;
+ void visit(INode *node, ops::BiasAddOp &op) override;
+ void visit(INode *node, ops::VariableOp &op) override;
+ void visit(INode *node, ops::ReluOp &op) override;
+ void visit(INode *node, ops::ReshapeOp &op) override;
+ void visit(INode *node, ops::ScaleOp &op) override;
+ void visit(INode *node, ops::BatchNormOp &op) override;
+ void visit(INode *node, ops::DropoutOp &op) override;
~Visitor() override = default;
};
-} // namespace model
-} // namespace IR
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_VISITOR_H_
#include "core/modelIR/Shape.h"
#include "core/modelIR/TensorVariant.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::ADT::TensorVariant;
+namespace nnc
+{
+namespace mir
+{
/**
* @brief template class for deserialization
T deserializeFromString(const std::string&);
};
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_SERIALIZER_H
#include "core/modelIR/Shape.h"
#include "core/modelIR/Tensor.h"
-namespace nncc {
-namespace contrib {
-namespace core {
-
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::Tensor;
+namespace nnc
+{
+namespace mir
+{
/**
* @brief template class for serialization
std::string getSerializedObject(const T&);
};
-} // namespace core
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_CORE_IR_MODEL_SERIALIZER_H
#include <string>
#include "support/CommandLine.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace clopt
+namespace cli
{
/**
extern Option<std::string> interInNode; // name of input node in computational graph
extern Option<std::vector<std::string>> interOutNode; // name of output nodes in computational graph
-} // namespace clopt
-} // namespace contrib
-} // namespace nncc
+} // namespace cli
+} // namespace nnc
#endif //NNCC_COMMANDLINEARGUMENTS_H
#include "pass/PassData.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace pass
+namespace nnc
{
/**
virtual ~Pass() = default;
};
-} // namespace pass
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_PASS_H
#include "core/modelIR/graph.h"
#include "core/modelIR/TensorVariant.h"
-using namespace nncc::contrib::core::IR::model;
-namespace nncc
-{
-namespace contrib
-{
-namespace pass
+namespace nnc
{
/**
/**
* @brief Implicit conversion from Graph* to PassData
*/
- /* implicit */ PassData(Graph *graph) { _dataContainer.graph = graph; _dataType = PDT::GRAPH; }
+ /* implicit */ PassData(mir::Graph *graph) { _dataContainer.graph = graph; _dataType = PDT::GRAPH; }
/**
* @brief Implicit conversion from PassData to Graph*
*/
- /* implicit */ operator Graph*() const {
+ /* implicit */ operator mir::Graph*() const {
if ( _dataType != PDT::GRAPH )
return nullptr;
return _dataContainer.graph;
/**
* @brief Implicit conversion from Graph* to PassData
*/
- /* implicit */ PassData(TensorVariant *tv) { _dataContainer.tensorVariant = tv; _dataType = PDT::TENSOR_VARIANT; }
+ /* implicit */ PassData(mir::TensorVariant *tv) { _dataContainer.tensorVariant = tv; _dataType = PDT::TENSOR_VARIANT; }
/**
* @brief Implicit conversion from PassData to Graph*
*/
- /* implicit */ operator TensorVariant*() const {
+ /* implicit */ operator mir::TensorVariant*() const {
if ( _dataType != PDT::TENSOR_VARIANT )
return nullptr;
return _dataContainer.tensorVariant;
// union contains all pointers to objects that can be returned from passes
union
{
- Graph *graph;
- TensorVariant *tensorVariant;
+ mir::Graph *graph;
+ mir::TensorVariant *tensorVariant;
void *unknown;
} _dataContainer;
};
-} // namespace pass
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_PASSDATA_H
#include <exception>
#include <string>
-namespace nncc
-{
-namespace contrib
-{
-namespace pass
+namespace nnc
{
/**
std::string _msg;
};
-} // namespace pass
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_PASSEXCEPTION_H
#include <queue>
-namespace nncc
-{
-namespace contrib
-{
-namespace pass
+namespace nnc
{
// forward declaration
Passes _passes; // registered passes
};
-} // namespace pass
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // __PASS_MANAGER_H__
#include "pass/Pass.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
/**
PassData run(PassData data) override;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_ACL_CPP_GENERATOR_H_
#include <fstream>
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
/**
std::ofstream _out;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_ARTIFACT_GENERATOR_CPP_CODE_H_
#include <fstream>
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
/**
std::ofstream _out;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_ARTIFACT_GENERATOR_CPP_DECL_H_
#include "IArtifactGenerator.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
/**
std::string _name;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_ARTIFACT_MODEL_H_
#ifndef _NNC_ARTIFACT_GENERATOR_INTERFACE_H_
#define _NNC_ARTIFACT_GENERATOR_INTERFACE_H_
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
class ArtifactEntity;
virtual void visit(const ArtifactModule* node) = 0;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_ARTIFACT_GENERATOR_INTERFACE_H_
#include "pass/Pass.h"
#include "pass/PassData.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
};
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_CAFFEFRONTEND_H
#include <string>
#include <cstdint>
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
-{
-namespace common
+namespace nnc
{
// Class that can be used to memory map a file with NN model
int fd = -1;
};
-} // namespace common
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // FRONTEND_COMMON_MODEL_ALLOCATION_H_
#ifndef FRONTEND_COMMON_INCLUDE_NN_IMPORTER_
#define FRONTEND_COMMON_INCLUDE_NN_IMPORTER_
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
-{
-namespace common
+namespace nnc
{
class NNImporter
virtual void dump() = 0;
};
-} // namespace common
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // FRONTEND_COMMON_INCLUDE_NN_IMPORTER_
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace frontend
-{
-namespace common
-{
-
-using nncc::contrib::core::data::Shape;
class ShapeHelper
{
public:
template<typename Iterable>
- static Shape createShape(const Iterable &iter, std::size_t);
+ static mir::Shape createShape(const Iterable &iter, std::size_t);
- static Shape &cutOffBatchDim(Shape &shape);
+ static mir::Shape &cutOffBatchDim(mir::Shape &shape);
};
template<typename Iterable>
-Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size)
+mir::Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size)
{
- Shape sh;
+ mir::Shape sh;
sh.resize(static_cast<uint32_t>(size));
unsigned int i = 0;
return sh;
}
-} // namespace common
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_SHAPE_HELPER_H
#include "core/modelIR/Tensor.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
+namespace mir
{
-namespace backend
-{
-namespace interpreter
-{
-namespace core
-{
-
-using namespace nncc::contrib::core::IR::model;
-using nncc::contrib::core::data::Index;
-using nncc::contrib::core::data::Tensor;
class NNInterpreter : public IVisitor
{
public:
explicit NNInterpreter() = default;
- void visit(ADT::INode::Ref node, ops::ConcatOp &op) override;
- void visit(ADT::INode::Ref node, ops::Conv2DOp &op) override;
- void visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) override;
- void visit(ADT::INode::Ref node, ops::ReluOp &op) override;
- void visit(ADT::INode::Ref node, ops::SoftmaxOp &op) override;
- void visit(ADT::INode::Ref node, ops::PoolOp &op) override;
- void visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) override;
- void visit(ADT::INode::Ref node, ops::CappedReluOp &op) override;
- void visit(ADT::INode::Ref node, ops::BiasAddOp &op) override;
- void visit(ADT::INode::Ref node, ops::VariableOp &op) override;
- void visit(ADT::INode::Ref node, ops::ReshapeOp &op) override;
- void visit(ADT::INode::Ref node, ops::ScaleOp &op) override;
- void visit(ADT::INode::Ref node, ops::BatchNormOp &op) override;
- void visit(ADT::INode::Ref node, ops::DropoutOp &op) override;
+ void visit(INode::Ref node, ops::ConcatOp &op) override;
+ void visit(INode::Ref node, ops::Conv2DOp &op) override;
+ void visit(INode::Ref node, ops::DepthwiseConv2DOp &op) override;
+ void visit(INode::Ref node, ops::ReluOp &op) override;
+ void visit(INode::Ref node, ops::SoftmaxOp &op) override;
+ void visit(INode::Ref node, ops::PoolOp &op) override;
+ void visit(INode::Ref node, ops::FullyConnectedOp &op) override;
+ void visit(INode::Ref node, ops::CappedReluOp &op) override;
+ void visit(INode::Ref node, ops::BiasAddOp &op) override;
+ void visit(INode::Ref node, ops::VariableOp &op) override;
+ void visit(INode::Ref node, ops::ReshapeOp &op) override;
+ void visit(INode::Ref node, ops::ScaleOp &op) override;
+ void visit(INode::Ref node, ops::BatchNormOp &op) override;
+ void visit(INode::Ref node, ops::DropoutOp &op) override;
void setInput(const std::string &name, const TensorVariant& data);
- std::vector<TensorVariant> &getResult(ADT::INode::Ref node);
+ std::vector<TensorVariant> &getResult(INode::Ref node);
/**
* @brief Intermediate interpreter results getter
* @param nodeName - name of node
* @brief Used to collect nodes data for getting intermediate interpreter results
* @param n - reference to node
*/
- void mapByName(ADT::INode::Ref n);
+ void mapByName(INode::Ref n);
private:
std::map<size_t, std::vector<TensorVariant>> vars;
std::unordered_map<std::string, TensorVariant> data;
- std::map<std::string, ADT::INode::Ref> nodeByName;
+ std::map<std::string, INode::Ref> nodeByName;
};
-} // namespace core
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#endif //_NNC_BACKEND_INTERPRETER_CORE_INTERPRETER_
#include "pass/Pass.h"
#include "pass/PassData.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
+namespace nnc
{
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
class InterpreterPass : public Pass
{
virtual ~InterpreterPass();
private:
- nncc::contrib::core::ADT::TensorVariant loadInput(const nncc::contrib::core::data::Shape &);
- nncc::contrib::core::ADT::TensorVariant *_out;
+ nnc::mir::TensorVariant loadInput(const nnc::mir::Shape &);
+ nnc::mir::TensorVariant *_out;
};
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_INTERPRETERPASS_H
#include <string>
#include <ostream>
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
class ModelAnalyzer;
std::string _paramsPath;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_SOFT_BACKEND_BASE_GENERATOR_H_
#include "passes/soft_backend/BaseGenerator.h"
#include "pass/Pass.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
/**
CCodeGenerator() = default;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_SOFT_BACKEND_C_GENERATOR_H_
#include "passes/soft_backend/BaseGenerator.h"
#include "pass/Pass.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
class TensorDescription;
CPPCodeGenerator(): BaseCodeGenerator() {}
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_SOFT_BACKEND_CPP_GENERATOR_H_
#include "pass/Pass.h"
#include "pass/PassData.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
};
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_TFLITEFRONTEND_H
#include <limits>
#include <iostream>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace clopt
+namespace cli
{
// forward declarations
void checkOutDir(const Option<std::string> &);
void checkDebugFile(const Option<std::string> &);
-} // namespace clopt
-} // namespace contrib
-} // namespace nncc
+} // namespace cli
+} // namespace nnc
#endif //NNCC_COMMANDLINE_H
#include "pass/PassManager.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace pass
+namespace nnc
{
PassManager *PassManager::getPassManager()
} // registerPass
-} // namespace pass
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "passes/acl_soft_backend/AclCPPGenerator.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
AclCPPCodeGenerator::AclCPPCodeGenerator()
return aclCPPCodeGenerator;
}
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "passes/acl_soft_backend/ArtifactModel.h"
using namespace std;
-using namespace nncc::contrib::backend::soft;
-ArtifactGeneratorCppCode::ArtifactGeneratorCppCode(const string& name)
+namespace nnc
+{
+
+ArtifactGeneratorCppCode::ArtifactGeneratorCppCode(const string &name)
{
}
-void ArtifactGeneratorCppCode::visit(const ArtifactLiteral* node)
+void ArtifactGeneratorCppCode::visit(const ArtifactLiteral *node)
{
_out << node->getValue();
}
-void ArtifactGeneratorCppCode::visit(const ArtifactFunctionCall* node)
+void ArtifactGeneratorCppCode::visit(const ArtifactFunctionCall *node)
{
_out << node->getFuncName();
_out << "(";
bool addComma = false;
- for (const auto* par : node->getParamList())
+ for (const auto *par : node->getParamList())
{
if (addComma)
_out << ", ";
_out << ")";
}
-void ArtifactGeneratorCppCode::visit(const ArtifactBlock* node)
+void ArtifactGeneratorCppCode::visit(const ArtifactBlock *node)
{
_out << "{" << endl;
- for (const auto* st : node->getStatements())
+ for (const auto *st : node->getStatements())
{
st->accept(this);
_out << ";";
_out << "}" << endl;
}
-void ArtifactGeneratorCppCode::visit(const ArtifactFunction* node)
+void ArtifactGeneratorCppCode::visit(const ArtifactFunction *node)
{
}
-void ArtifactGeneratorCppCode::visit(const ArtifactModule* node)
+void ArtifactGeneratorCppCode::visit(const ArtifactModule *node)
{
}
+
+} // namespace nnc
#include "passes/acl_soft_backend/ArtifactModel.h"
using namespace std;
-using namespace nncc::contrib::backend::soft;
+
+namespace nnc
+{
ArtifactGeneratorCppDecl::ArtifactGeneratorCppDecl(const string& name)
{
{
}
+
+} // namespace nnc
#include "passes/acl_soft_backend/IArtifactGenerator.h"
using namespace std;
-using namespace nncc::contrib::backend::soft;
#include "caffe_dump_visitor.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "caffe_visitor.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
};
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_CAFFE_DUMP_VISITOR_H
#include "caffe_importer.h"
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::frontend::caffe;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
PassData CaffeFrontend::run(PassData data)
{
(void)data;
- nncc::contrib::frontend::caffe::CaffeImporter importer{clopt::inputFile};
+ nnc::caffe::CaffeImporter importer{cli::inputFile};
bool success = importer.import();
if (!success)
{
- throw PassException("Could not load model: " + clopt::inputFile + "\n");
+ throw PassException("Could not load model: " + cli::inputFile + "\n");
}
- return reinterpret_cast<Graph *>(importer.createIR());
+ return reinterpret_cast<mir::Graph *>(importer.createIR());
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "caffe_importer.h"
#include "proto_reader.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
net.reset(new NetParameter());
// import success flag is returned
- return util::readProtoFromBinaryFile(modelFilename.c_str(), net.get());
+ return readProtoFromBinaryFile(modelFilename.c_str(), net.get());
}
void *CaffeImporter::createIR()
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "passes/common_frontend/nn_importer.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
using namespace ::caffe;
-class CaffeImporter : public common::NNImporter
+class CaffeImporter : public NNImporter
{
public:
explicit CaffeImporter(std::string filename) : modelFilename(std::move(filename)) {};
};
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_CAFFE_IMPORTER_H
#include "passes/common_frontend/shape_helper.h"
#include "caffe_model_visitor.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
-using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp;
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::util::transposeTensor;
+using VariableOp = nnc::mir::ops::VariableOp;
+using nnc::mir::Shape;
+using nnc::mir::transposeTensor;
void ModelVisitor::visit(const NetParameter& np)
{
for (const auto &shape : lp.input_param().shape())
{
- Shape sh = common::ShapeHelper::createShape(shape.dim(), shape.dim_size());
- inputShapes.push_back(common::ShapeHelper::cutOffBatchDim(sh));
+ Shape sh = ShapeHelper::createShape(shape.dim(), shape.dim_size());
+ inputShapes.push_back(ShapeHelper::cutOffBatchDim(sh));
}
if (!inputShapes.empty())
char *dstData = tensorBufferCopy.get();
memcpy(dstData, srcData, bufferSize);
- Shape tensorShape = common::ShapeHelper::createShape(
+ Shape tensorShape = ShapeHelper::createShape(
bp.shape().dim(), static_cast<size_t>(bp.shape().dim_size()));
auto tensor = std::make_shared<IrTensor>(tensorShape, tensorBufferCopy, type, elementSize);
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "caffe_visitor.h"
#include "caffe_op_creator.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
-using namespace ::caffe;
-
-using IrTensor = nncc::contrib::core::ADT::TensorVariant;
-using nncc::contrib::core::IR::model::Graph;
-using nncc::contrib::core::IR::model::ADT::INode;
-using nncc::contrib::core::data::Shape;
-
class ModelVisitor : public Visitor
{
public:
- ModelVisitor() : graph(new Graph()), opCreator(graph) {};
+ ModelVisitor() : graph(new mir::Graph()), opCreator(graph) {};
- void visit(const NetParameter&) override;
- void visit(const LayerParameter&) override;
- void visit(const BlobProto&) override;
- void visit(const BlobShape&) override;
+ void visit(const ::caffe::NetParameter&) override;
+ void visit(const ::caffe::LayerParameter&) override;
+ void visit(const ::caffe::BlobProto&) override;
+ void visit(const ::caffe::BlobShape&) override;
- Graph* getGraph();
+ mir::Graph* getGraph();
void setGraphOutputs();
void setIrNodeNames();
private:
- Graph* graph = nullptr;
+ mir::Graph* graph = nullptr;
OpCreator opCreator;
- std::vector<Shape> inputShapes;
- std::map<std::string, INode::Ref> opsForBlobsTheyOutput;
- std::vector<INode::Ref> graphOutputs;
+ std::vector<mir::Shape> inputShapes;
+ std::map<std::string, mir::INode::Ref> opsForBlobsTheyOutput;
+ std::vector<mir::INode::Ref> graphOutputs;
- std::shared_ptr<IrTensor> createTensor(const BlobProto&);
- std::vector<INode::Ref> createOpInputs(const LayerParameter&);
- std::vector<std::shared_ptr<IrTensor>> createOpParams(const LayerParameter&);
+ std::shared_ptr<mir::TensorVariant> createTensor(const ::caffe::BlobProto&);
+ std::vector<mir::INode::Ref> createOpInputs(const ::caffe::LayerParameter&);
+ std::vector<std::shared_ptr<mir::TensorVariant>> createOpParams(const ::caffe::LayerParameter&);
void createGraphInputs(const std::vector<std::string> &names,
- const std::vector<Shape> &shapes);
- void processInputLayer(const LayerParameter&);
- void processDeprecatedInput(const NetParameter&);
+ const std::vector<mir::Shape> &shapes);
+ void processInputLayer(const ::caffe::LayerParameter&);
+ void processDeprecatedInput(const ::caffe::NetParameter&);
};
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_CAFFE_IR_VISITOR_H
#include <cmath>
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::core::data;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
-namespace util
-{
+using namespace mir;
+
template <typename OptsType>
static inline bool has2DStride(const OptsType& opts)
assert(kernelOutChannels % groups == 0);
// Iterate over "unfolded" kernel Shape and insert appropriate values into result kernel
- for (const core::data::Index &idx: core::data::ShapeRange(unfoldKernelShape))
+ for (const mir::Index &idx: mir::ShapeRange(unfoldKernelShape))
{
auto inGroupNo = idx.at(kernelInChanNum) / inGroupSize;
auto outGroupNo = idx.at(kernelOutChanNum) / outGroupSize;
if (inGroupNo == outGroupNo)
{
// compute index in original kernel that corresponds output index
- core::data::Index foldedIdx(idx);
+ mir::Index foldedIdx(idx);
foldedIdx.at(kernelInChanNum) %= inGroupSize;
std::copy(foldedKernel->at(foldedIdx), foldedKernel->at(foldedIdx) + dataSize, unfoldKernel->at(idx));
return unfoldKernel;
}
-} // namespace util
std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
const caffe::ConvolutionParameter& opts)
assert(opts.stride_size() <= 2);
ops::PaddingType padType = ops::PaddingType::Custom;
- Shape strideShape = util::getConvStride(opts);
+ Shape strideShape = getConvStride(opts);
std::shared_ptr<IrTensor> unfoldedTensor = params[0];
if (opts.group() != 1)
{
// first we need to convert kernel of grouped convolution to appropriate ordinary kernel
- unfoldedTensor = util::fixGroupedKernel(opts.group(), params[0]);
+ unfoldedTensor = fixGroupedKernel(opts.group(), params[0]);
}
auto outputs = createOp<ops::Conv2DOp>(inputs, std::move(*unfoldedTensor),
strideShape, padType);
{
(void)params;
- return createOp<ops::ConcatOp>(inputs, inputs.size(), util::getAxisValue(opts));
+ return createOp<ops::ConcatOp>(inputs, inputs.size(), getAxisValue(opts));
}
std::vector<INode::Ref> OpCreator::createPool(InputOps inputs, InputParams params,
throw PassException("Pooling layer global_pooling param is not supported yet");
}
- Shape windowShape = util::getPoolWindowShape(opts);
- ops::PoolOp::PoolingType poolType = util::getPoolingType(opts);
+ Shape windowShape = getPoolWindowShape(opts);
+ ops::PoolOp::PoolingType poolType = getPoolingType(opts);
ops::PaddingType padType = ops::PaddingType::Custom;
- Shape stride = util::getPoolStride(opts);
+ Shape stride = getPoolStride(opts);
ops::PoolOp::BorderType borderType;
switch (poolType)
{
{
(void)params;
- return createOp<ops::SoftmaxOp>(inputs, util::getAxisValue(opts));
+ return createOp<ops::SoftmaxOp>(inputs, getAxisValue(opts));
}
/**
throw PassException("Reshape layer doesn't have shape parameter");
}
- Shape newShape = common::ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size());
+ Shape newShape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size());
for (unsigned int i = 0; i < newShape.rank(); ++i)
{
if (params[2]->getShape().rank() != 1 && params[2]->getShape().dim(0) != 1)
throw PassException("Unexpected shape of scale parameter in batch norm");
- float scaleFactor = *reinterpret_cast<float *>(params[2]->at(core::data::Index{0}));
+ float scaleFactor = *reinterpret_cast<float *>(params[2]->at(mir::Index{0}));
// Code below is taken from cpu caffe implementation:
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
if (scaleFactor != 0.0f)
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "caffe/proto/caffe.pb.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
using namespace ::caffe;
-namespace ops = nncc::contrib::core::IR::model::ops;
-using nncc::contrib::core::IR::model::Graph;
-using nncc::contrib::core::IR::model::ADT::INode;
-using IrTensor = nncc::contrib::core::ADT::TensorVariant;
-using nncc::contrib::core::data::Shape;
+namespace ops = nnc::mir::ops;
+using nnc::mir::Graph;
+using nnc::mir::INode;
+using IrTensor = nnc::mir::TensorVariant;
+using nnc::mir::Shape;
class OpCreator
{
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_CAFFE_OP_CREATOR_H
#include "caffe/proto/caffe.pb.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
};
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //NNCC_CAFFE_VISITOR_H
#include "caffe_walker.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
}
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "caffe_visitor.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
};
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_CAFFE_WALKER_H
#include "proto_reader.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
-namespace util
-{
const int protoBytesLimit = INT_MAX;
const int protoBytesWarningLimit = 1024 * 1024 * 512;
return success;
}
-} // namespace util
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/text_format.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace caffe
{
-namespace util
-{
using google::protobuf::io::FileInputStream;
using google::protobuf::io::ZeroCopyInputStream;
bool readProtoFromTextFile(const char* filename, ::caffe::NetParameter* proto);
bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto);
-} // namespace util
} // namespace caffe
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_PROTO_READER_H
#include "passes/common_frontend/model_allocation.h"
-using namespace nncc::contrib::frontend::common;
+namespace nnc
+{
ModelAllocation::ModelAllocation(std::string filename)
{
stat st{};
int flag = fstat(fd, &st);
if (flag == -1)
- {
+ {
return;
}
const void *ModelAllocation::getDataPnt() { return mmapState == MAPPED ? dataPnt : nullptr; }
size_t ModelAllocation::getNumBytes() { return mmapState == MAPPED ? numBytes : 0; }
+
+} // namespace nnc
#include "passes/common_frontend/shape_helper.h"
#include "pass/PassException.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
-{
-namespace common
+namespace nnc
{
-Shape &ShapeHelper::cutOffBatchDim(Shape &shape)
+mir::Shape &ShapeHelper::cutOffBatchDim(mir::Shape &shape)
{
if (shape.dim(0) != 1)
{
return shape;
}
-} // namespace common
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "ops/Dropout.h"
#include "ops/BatchNorm.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace core
+namespace nnc
{
-using nncc::contrib::core::data::Tensor;
-namespace ops = nncc::contrib::core::IR::model::ops;
-namespace impl = nncc::contrib::backend::interpreter::impl;
+using namespace nnc::mir;
std::vector<TensorVariant> &NNInterpreter::var(size_t id) { return vars[id]; }
void NNInterpreter::setInput(const std::string &name, const TensorVariant& t) { data.emplace(name, t); }
-void NNInterpreter::visit(ADT::INode::Ref node, ops::VariableOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::VariableOp &op)
{
mapByName(node);
(void)op;
var(node->getId()) = {it->second};
}
-std::vector<TensorVariant> &NNInterpreter::getResult(ADT::INode::Ref node)
+std::vector<TensorVariant> &NNInterpreter::getResult(INode::Ref node)
{
auto res = vars.find(node->getId());
if (res != vars.end())
return getResult(it->second);
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::ConcatOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::ConcatOp &op)
{
mapByName(node);
auto &operands = node->getPrevNodes();
{
ins.push_back(var(in.node->getId())[in.index]);
}
- var(node->getId()) = impl::Concat<float>(ins, op.getOutputShape(0), op.getAxis())();
+ var(node->getId()) = Concat<float>(ins, op.getOutputShape(0), op.getAxis())();
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::Conv2DOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::Conv2DOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
- var(node->getId()) = impl::Conv2D(var(operand.node->getId())[operand.index], op)();
+ var(node->getId()) = Conv2D(var(operand.node->getId())[operand.index], op)();
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::ReshapeOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::ReshapeOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = impl::Reshape<float>(input, op)();
+ var(node->getId()) = Reshape<float>(input, op)();
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::ReluOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::ReluOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
Tensor<float> input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = impl::Fill<float>(
+ var(node->getId()) = Fill<float>(
op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })();
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::SoftmaxOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::SoftmaxOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = impl::Softmax(op.getInputShape(0), input, op.getAxis())();
+ var(node->getId()) = Softmax(op.getInputShape(0), input, op.getAxis())();
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::PoolOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::PoolOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = impl::Pool(input, op)();
+ var(node->getId()) = Pool(input, op)();
}
-void NNInterpreter::visit(ADT::INode::Ref node, ops::FullyConnectedOp &op)
+void NNInterpreter::visit(INode::Ref node, ops::FullyConnectedOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input = var(operand.node->getId())[operand.index];
- var(node->getId()) = impl::FullyConnected<float>(input, op)();
+ var(node->getId()) = FullyConnected<float>(input, op)();
}
-void NNInterpreter::visit(ADT::INode *node, ops::CappedReluOp &op)
+void NNInterpreter::visit(INode *node, ops::CappedReluOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
Tensor<float> input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = impl::Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+ var(node->getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
return std::min(std::max(input.at(id), 0.0f), op.getCap());
})();
}
-void NNInterpreter::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op)
+void NNInterpreter::visit(INode *node, ops::DepthwiseConv2DOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
- var(node->getId()) = impl::DepthwiseConv2D(input, op)();
+ var(node->getId()) = DepthwiseConv2D(input, op)();
}
-void NNInterpreter::visit(ADT::INode *node, ops::BiasAddOp &op)
+void NNInterpreter::visit(INode *node, ops::BiasAddOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
auto input = var(operand.node->getId())[operand.index];
- var(node->getId()) = impl::BiasAdd(input, op.getWeights(), op.getOutputShape(0))();
+ var(node->getId()) = BiasAdd(input, op.getWeights(), op.getOutputShape(0))();
}
-void NNInterpreter::visit(ADT::INode *node, ops::BatchNormOp &op)
+void NNInterpreter::visit(INode *node, ops::BatchNormOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
// TODO implement this
- var(node->getId()) = impl::BatchNorm<float>(input, op)();
+ var(node->getId()) = BatchNorm<float>(input, op)();
}
-void NNInterpreter::visit(ADT::INode *node, ops::ScaleOp &op)
+void NNInterpreter::visit(INode *node, ops::ScaleOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
// TODO implement this
- var(node->getId()) = impl::Scale(input, op)();
+ var(node->getId()) = Scale(input, op)();
}
-void NNInterpreter::visit(ADT::INode *node, ops::DropoutOp &op)
+void NNInterpreter::visit(INode *node, ops::DropoutOp &op)
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
// TODO implement this
- var(node->getId()) = impl::Dropout<float>(input, op)();
+ var(node->getId()) = Dropout<float>(input, op)();
}
-void NNInterpreter::mapByName(ADT::INode::Ref n) {
+void NNInterpreter::mapByName(INode::Ref n) {
auto &nodeName = n->getName();
if (nodeByName.find(nodeName) != nodeByName.end())
{
nodeByName[nodeName] = n;
}
-} // namespace core
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "core/modelIR/Tensor.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
+namespace nnc
{
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::core::data;
-using namespace nncc::contrib::core::IR::model;
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::backend::interpreter::core::NNInterpreter;
+using namespace mir;
Pass &InterpreterPass::getInstance() {
static InterpreterPass instance;
g->accept(&shapeInference);
// Check nodes
- auto inputNode = g->getInput(clopt::interInNode);
+ auto inputNode = g->getInput(cli::interInNode);
if (inputNode == nullptr) {
- throw PassException("input node <" + clopt::interInNode +"> not found" );
+ throw PassException("input node <" + cli::interInNode +"> not found" );
}
auto input = loadInput(inputNode->getOperation()->getOutputShape(0));
- interpreter.setInput(clopt::interInNode, input);
+ interpreter.setInput(cli::interInNode, input);
g->accept(&interpreter);
// Check nodes
- for (auto &tensorName : clopt::interOutNode) {
+ for (auto &tensorName : cli::interOutNode) {
auto outputNode = interpreter.getOperationResult(tensorName);
if (outputNode.empty()) {
throw PassException("output node <" + tensorName + "> not found");
}
}
- bool is_several_outs = (clopt::interOutNode.size() > 1);
+ bool is_several_outs = (cli::interOutNode.size() > 1);
- nncc::contrib::core::ADT::TensorVariant *out = nullptr;
- for (auto &tensorName : clopt::interOutNode) {
+ nnc::mir::TensorVariant *out = nullptr;
+ for (auto &tensorName : cli::interOutNode) {
out = new TensorVariant(interpreter.getOperationResult(tensorName)[0]);
#ifdef NNC_HDF5_SUPPORTED
- writeTensorToHDF5File(out, tensorName, clopt::artifactDir);
+ writeTensorToHDF5File(out, tensorName, cli::artifactDir);
#else
std::cout << "Result <" << tensorName << "> wasn't saved, due to lack of HDF5" << std::endl;
#endif // NNC_HDF5_SUPPORTED
TensorVariant InterpreterPass::loadInput(const Shape &shape)
{
- auto f = fopen(clopt::interInputData.c_str(), "rb");
+ auto f = fopen(cli::interInputData.c_str(), "rb");
assert(f && "Cannot open file");
int is_error = fseek(f, 0L, SEEK_END);
// Check size
if (len != tensorSize) {
std::stringstream info;
- info << "Wrong input file size <" << clopt::interInputData << "> = " << len << ". Should be :" << tensorSize;
+ info << "Wrong input file size <" << cli::interInputData << "> = " << len << ". Should be :" << tensorSize;
throw PassException(info.str());
}
delete _out;
}
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "core/modelIR/operations/batch_norm.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::IR::model::ops::BatchNormOp;
/**
* @brief Implements DropoutOp for interpreter backend
* @param in input data
* @param op batch normalization operation description
*/
- explicit BatchNorm(const TensorVariant& input, const BatchNormOp& op) : _input(input), _op(op) {}
+ explicit BatchNorm(const mir::TensorVariant& input, const mir::ops::BatchNormOp& op) : _input(input), _op(op) {}
/**
* @brief computes operation aplication result
* @return vector of all outputs from this node
*/
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
//For now BatchNorm just copies input to output
- return Fill<T>(_input.getShape(), [this](const Index& idx) {
+ return Fill<T>(_input.getShape(), [this](const mir::Index& idx) {
return _input.at(idx);
})();
}
private:
- const Tensor<float> _input;
- const BatchNormOp& _op;
+ const mir::Tensor<float> _input;
+ const mir::ops::BatchNormOp& _op;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
#include "OperationImpl.h"
#include "Fill.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
class BiasAdd : public OperationImpl<float>
{
public:
- BiasAdd(const TensorVariant &input, const TensorVariant &weights, const Shape &outputShape)
+ BiasAdd(const mir::TensorVariant &input, const mir::TensorVariant &weights, const mir::Shape &outputShape)
: _weights(weights), _input(input), _outputShape(outputShape)
{
assert(_weights.getShape().rank() == 1);
assert(_outputShape.dim(_outputShape.rank() - 1) == _weights.getShape().dim(0));
}
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
- return Fill<float>(_outputShape, [this](const Index &idx) {
+ return Fill<float>(_outputShape, [this](const mir::Index &idx) {
return _input.at(idx) + _weights.at({idx.at(idx.rank() - 1)});
})();
}
private:
- const Tensor<float> _weights;
- const Tensor<float> _input;
- const Shape &_outputShape;
+ const mir::Tensor<float> _weights;
+ const mir::Tensor<float> _input;
+ const mir::Shape &_outputShape;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_BACKEND_INTERPRETER_BIAS_
#include "Fill.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
template <typename T> class Concat : public Fill<T>
{
public:
- explicit Concat(const std::vector<TensorVariant> &inputs, const Shape &outputShape,
+ explicit Concat(const std::vector<mir::TensorVariant> &inputs, const mir::Shape &outputShape,
unsigned int axis)
: Fill<T>(outputShape, getSingleFunction(inputs, axis))
{
}
private:
- const std::function<T(const Index &)> getSingleFunction(const std::vector<TensorVariant> &inputs,
+ const std::function<T(const mir::Index &)> getSingleFunction(const std::vector<mir::TensorVariant> &inputs,
unsigned int axis)
{
- std::vector<Tensor<T>> inputAccessors;
+ std::vector<mir::Tensor<T>> inputAccessors;
for (auto &in : inputs)
{
inputAccessors.emplace_back(in);
}
- return std::function<T(const Index &)>([inputAccessors, axis](const Index &id) -> T {
+ return std::function<T(const mir::Index &)>([inputAccessors, axis](const mir::Index &id) -> T {
unsigned int mi = 0;
uint32_t along_axis = id.at(axis);
mi++;
}
- Index local_id = id;
+ mir::Index local_id = id;
local_id.at(axis) = along_axis;
return inputAccessors[mi].at(local_id);
}
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_FILL_IMPL_
#include "Depthwise_conv_2D.h"
#include "common.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
+using namespace mir;
+using namespace mir::ops;
+
std::vector<TensorVariant> DepthwiseConv2D::operator()()
{
TensorVariant res = allocate_tensor(_out_shape);
assert(_kernel.getShape().dim(2) == _input.getShape().dim(2));
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "core/modelIR/operations/common.h"
#include "core/modelIR/operations/depthwise_conv2d_op.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::IR::model::ops::DepthwiseConv2DOp;
-using nncc::contrib::core::IR::model::ops::PaddingType;
class DepthwiseConv2D : public OperationImpl<float>
{
public:
- explicit DepthwiseConv2D(const TensorVariant &input, const DepthwiseConv2DOp &op);
- virtual std::vector<TensorVariant> operator()() override;
+ explicit DepthwiseConv2D(const mir::TensorVariant &input, const mir::ops::DepthwiseConv2DOp &op);
+ virtual std::vector<mir::TensorVariant> operator()() override;
private:
- const Tensor<float> _input;
- const Tensor<float> _kernel;
- const Shape _strides;
- const PaddingType _padding;
- const Shape &_out_shape;
- const DepthwiseConv2DOp &_op;
+ const mir::Tensor<float> _input;
+ const mir::Tensor<float> _kernel;
+ const mir::Shape _strides;
+ const mir::ops::PaddingType _padding;
+ const mir::Shape &_out_shape;
+ const mir::ops::DepthwiseConv2DOp &_op;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_DEPTHWISE_CONV2D_IMPL_
#include "core/modelIR/operations/dropout_op.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::IR::model::ops::DropoutOp;
/**
* @brief Implements DropoutOp for interpreter backend
* @param in input data
* @param op dropout operation description
*/
- explicit Dropout(const TensorVariant& in, const DropoutOp& op) : _input(in), _op(op) {}
+ explicit Dropout(const mir::TensorVariant& in, const mir::ops::DropoutOp& op) : _input(in), _op(op) {}
/**
* @brief computes operation aplication result
* @return vector of all outputs from this node
*/
- std::vector<TensorVariant> operator()() override;
+ std::vector<mir::TensorVariant> operator()() override;
private:
- const Tensor<float> _input;
- const DropoutOp& _op;
+ const mir::Tensor<float> _input;
+ const mir::ops::DropoutOp& _op;
};
template<typename T>
-std::vector<TensorVariant> Dropout<T>::operator()()
+std::vector<mir::TensorVariant> Dropout<T>::operator()()
{
//For now dropout just copies input to output
- return Fill<T>(_input.getShape(), [this](const Index& idx) {
+ return Fill<T>(_input.getShape(), [this](const mir::Index& idx) {
return _input.at(idx);
})();
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
#include "OperationImpl.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::ADT::TensorVariant;
-using nncc::contrib::core::data::Tensor;
template <typename T> class Elementwise : public OperationImpl<T>
{
public:
- explicit Elementwise(const Shape &shape) : _shape(shape){};
+ explicit Elementwise(const mir::Shape &shape) : _shape(shape){};
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
auto res = OperationImpl<T>::allocate_tensor(_shape);
- Tensor<T> accessor(res);
+ mir::Tensor<T> accessor(res);
elemwise(accessor);
return {res};
}
- virtual T single(const Index &index) = 0;
+ virtual T single(const mir::Index &index) = 0;
protected:
- void elemwise(Tensor<T> &res)
+ void elemwise(mir::Tensor<T> &res)
{
- for (auto &idx : ShapeRange(_shape))
+ for (auto &idx : mir::ShapeRange(_shape))
{
res.at(idx) = single(idx);
}
}
- const Shape &_shape;
+ const mir::Shape &_shape;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_ELEMENTWISE_IMPL_
#include "Elementwise.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
template <typename T> class Fill : public Elementwise<T>
{
public:
- explicit Fill(const Shape &shape, std::function<T(const Index &)> f)
+ explicit Fill(const mir::Shape &shape, std::function<T(const mir::Index &)> f)
: Elementwise<T>(shape), _fval(f)
{
}
- T single(const Index &index) override { return _fval(index); }
+ T single(const mir::Index &index) override { return _fval(index); }
private:
- const std::function<T(const Index &)> _fval;
+ const std::function<T(const mir::Index &)> _fval;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_FILL_
#include "core/modelIR/operations/fully_connected_op.h"
#include "OperationImpl.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::IR::model::ops::FullyConnectedOp;
template<typename T>
class FullyConnected : public OperationImpl<T>
{
public:
- FullyConnected(const TensorVariant &_input, const FullyConnectedOp &_op) : _op(_op), _input(_input) {}
+ FullyConnected(const mir::TensorVariant &_input, const mir::ops::FullyConnectedOp &_op) : _op(_op), _input(_input) {}
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
- TensorVariant res = OperationImpl<T>::allocate_tensor(_op.getOutputShape(0));
- Tensor<T> accessor(res);
+ mir::TensorVariant res = OperationImpl<T>::allocate_tensor(_op.getOutputShape(0));
+ mir::Tensor<T> accessor(res);
- ShapeRange outRange(res.getShape());
+ mir::ShapeRange outRange(res.getShape());
- Tensor<T> weights(_op.getWeights());
- const Shape &wShape = weights.getShape();
+ mir::Tensor<T> weights(_op.getWeights());
+ const mir::Shape &wShape = weights.getShape();
uint32_t wRank = wShape.rank();
- const Shape &inShape = _input.getShape();
+ const mir::Shape &inShape = _input.getShape();
uint32_t inRank = inShape.rank();
assert(inShape.dim(inRank - 1) == wShape.dim(wRank - 2));
uint32_t col;
for (auto &outIdx : outRange)
{
- Index tIdx = outIdx;
+ mir::Index tIdx = outIdx;
T& outputElement = accessor.at(outIdx);
col = tIdx.at(wRank - 1);
row = tIdx.at(wRank - 2);
}
private:
- const FullyConnectedOp &_op;
- const Tensor<T> _input;
+ const mir::ops::FullyConnectedOp &_op;
+ const mir::Tensor<T> _input;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_FULLYCONNECTED_
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using namespace nncc::contrib::core::data;
-using nncc::contrib::core::ADT::TensorVariant;
-
-using nncc::contrib::core::data::Shape;
template <typename T> class OperationImpl
{
public:
- virtual std::vector<TensorVariant> operator()() = 0;
+ virtual std::vector<mir::TensorVariant> operator()() = 0;
protected:
- TensorVariant allocate_tensor(const Shape &shape)
+ mir::TensorVariant allocate_tensor(const mir::Shape &shape)
{
size_t data_size = 1;
for (uint32_t i = 0; i < shape.rank(); ++i)
std::shared_ptr<T> data(od, [](const T* d) { delete[] d; });
// Use hardcoded DTYPE for now, since theres no support for operations on types other than
// floats
- TensorVariant t(shape, data, TensorVariant::DTYPE::FLOAT);
+ mir::TensorVariant t(shape, data, mir::TensorVariant::DTYPE::FLOAT);
return t;
}
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_OPERATION_IMPL_
#include "Pool.h"
#include "common.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
+using namespace mir;
+using namespace mir::ops;
+
Pool::Pool(const TensorVariant &_input, const PoolOp &op) : _op(op), _input(_input)
{
assert(op.getWindowShape().rank() == _input.getShape().rank());
}
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "core/modelIR/operations/pool_op.h"
#include "core/modelIR/operations/common.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::IR::model::ops::PaddingType;
-using nncc::contrib::core::IR::model::ops::PoolOp;
class Pool : public OperationImpl<float>
{
public:
- std::vector<TensorVariant> operator()() override;
+ std::vector<mir::TensorVariant> operator()() override;
- explicit Pool(const TensorVariant &_input, const PoolOp &op);
+ explicit Pool(const mir::TensorVariant &_input, const mir::ops::PoolOp &op);
float poolingFunc(float prev, float val);
private:
- const PoolOp&_op;
- const Tensor<float> _input;
+ const mir::ops::PoolOp&_op;
+ const mir::Tensor<float> _input;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_POOL_
#include "Fill.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
template <typename T> class Reduce : public OperationImpl<T>
{
public:
- Reduce(const Shape &inputShape, const Shape &outputShape, const TensorVariant &input, uint32_t axis,
+ Reduce(const mir::Shape &inputShape, const mir::Shape &outputShape, const mir::TensorVariant &input, uint32_t axis,
std::function<T(const T &, const T &)> reduceFunc)
: _inShape(inputShape), _outputShape(outputShape), _input(input), _axis(axis),
_reduceFunc(reduceFunc)
assert(outputShape.dim(axis) == 1);
}
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
- return Fill<T>(_outputShape, [this](const Index &id) {
+ return Fill<T>(_outputShape, [this](const mir::Index &id) {
T element = T();
- Index inputId = id;
+ mir::Index inputId = id;
uint32_t end = _inShape.dim(_axis);
for (uint32_t i = 0; i < end; ++i)
{
}
private:
- const Shape &_inShape;
- const Shape &_outputShape;
- const Tensor<T> _input;
+ const mir::Shape &_inShape;
+ const mir::Shape &_outputShape;
+ const mir::Tensor<T> _input;
const uint32_t _axis;
const std::function<T(T, T)> _reduceFunc;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_REDUCE_IMPL_
#include "OperationImpl.h"
#include "Fill.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::IR::model::ops::ReshapeOp;
template <typename T> class Reshape : public OperationImpl<T>
{
public:
- Reshape(const TensorVariant &input, const ReshapeOp &op) : _input(input), _op(op)
+ Reshape(const mir::TensorVariant &input, const mir::ops::ReshapeOp &op) : _input(input), _op(op)
{
assert(num_elements(_op.getInputShape(0)) == num_elements(_op.getOutputShape(0)));
}
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
- const Shape &outShape = _op.getOutputShape(0);
- const Shape &inShape = _op.getInputShape(0);
+ const mir::Shape &outShape = _op.getOutputShape(0);
+ const mir::Shape &inShape = _op.getInputShape(0);
- ShapeRange inRange(inShape);
- ShapeRange outRange(outShape);
+ mir::ShapeRange inRange(inShape);
+ mir::ShapeRange outRange(outShape);
auto inIter = inRange.begin();
auto out = OperationImpl<T>::allocate_tensor(outShape);
- Tensor<float> outAccessor(out);
+ mir::Tensor<float> outAccessor(out);
// Shapes element count compared in Reshape ctor
- return Fill<T>(outShape, [this, &inIter](const Index &) -> float { return _input.at(*inIter++); })();
+ return Fill<T>(outShape, [this, &inIter](const mir::Index &) -> float { return _input.at(*inIter++); })();
}
private:
- Tensor<T> _input;
- const ReshapeOp &_op;
+ mir::Tensor<T> _input;
+ const mir::ops::ReshapeOp &_op;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_RESHAPE_IMPL_
#include "Fill.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
-std::vector<TensorVariant> Scale::operator()()
+std::vector<mir::TensorVariant> Scale::operator()()
{
//For now handles only most common case with scale applied by last dimension
- Tensor<float> weightsAccessor(_op.getWeights());
- return Fill<float>(_input.getShape(), [this, weightsAccessor](const Index &idx) {
+ mir::Tensor<float> weightsAccessor(_op.getWeights());
+ return Fill<float>(_input.getShape(), [this, weightsAccessor](const mir::Index &idx) {
return _input.at(idx) * weightsAccessor.at({idx.at(idx.rank() - 1)});
})();
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "core/modelIR/operations/scale_op.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
/**
* @brief Implements ScaleOp for interpreter backend
* @todo check if I need support for any datatypes other than DTYPE::FLOAT
*/
-using nncc::contrib::core::IR::model::ops::ScaleOp;
-
class Scale : public OperationImpl<float> {
public:
/**
* @param in input data
* @param op scale operation description
*/
- explicit Scale(const TensorVariant& in, const ScaleOp& op) : _input(in), _op(op) {}
+ explicit Scale(const mir::TensorVariant& in, const mir::ops::ScaleOp& op) : _input(in), _op(op) {}
/**
* @brief computes operation aplication result
* @return vector of all outputs from this node
*/
- std::vector<TensorVariant> operator()() override;
+ std::vector<mir::TensorVariant> operator()() override;
private:
- Tensor<float> _input;
- const ScaleOp& _op;
+ mir::Tensor<float> _input;
+ const mir::ops::ScaleOp& _op;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_
#include "Elementwise.h"
#include "Reduce.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
class Softmax : public OperationImpl<float>
{
public:
- Softmax(const Shape &inputShape, const TensorVariant &input, uint32_t axis)
+ Softmax(const mir::Shape &inputShape, const mir::TensorVariant &input, uint32_t axis)
: _inShape(inputShape), _axis(axis), _input(input)
{
}
- std::vector<TensorVariant> operator()() override
+ std::vector<mir::TensorVariant> operator()() override
{
- Tensor<float> inputAccessor(_input);
+ mir::Tensor<float> inputAccessor(_input);
- Shape expsumShape = _inShape;
+ mir::Shape expsumShape = _inShape;
expsumShape.dim(_axis) = 1;
- TensorVariant expsum =
+ mir::TensorVariant expsum =
Reduce<float>(_inShape, expsumShape, _input, _axis,
[](float expsum, float item) { return expsum + std::exp(item); })()[0];
- Tensor<float> expsumAccessor(expsum);
+ mir::Tensor<float> expsumAccessor(expsum);
- return Fill<float>(_inShape, [&inputAccessor, &expsumAccessor, this](const Index &id) {
- Index expsumIndex = id;
+ return Fill<float>(_inShape, [&inputAccessor, &expsumAccessor, this](const mir::Index &id) {
+ mir::Index expsumIndex = id;
expsumIndex.at(_axis) = 0;
return std::exp(inputAccessor.at(id)) / expsumAccessor.at(expsumIndex);
})();
};
private:
- const Shape &_inShape;
+ const mir::Shape &_inShape;
const uint32_t _axis;
- const TensorVariant _input;
+ const mir::TensorVariant _input;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_SOFTMAX_IMPL_
#include "common.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
+using namespace mir;
+
void translate(Index &translatedIndex, const Index &sourceIndex, const Index &kernelIndex,
const Shape &strides, const Index &paddings)
{
}
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "core/modelIR/Index.h"
#include "core/modelIR/Shape.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-
-using nncc::contrib::core::data::Index;
-using nncc::contrib::core::data::Shape;
-
///
/// Get current input element index using output index, current kernel index, strides and paddings
/// \param[in] kernelIndex current kernel element
/// \param[in] strides
/// \param[in] paddings
-void translate(Index &translatedIndex, const Index &sourceIndex, const Index &kernelIndex,
- const Shape &strides, const Index &paddings);
+void translate(mir::Index &translatedIndex, const mir::Index &sourceIndex, const mir::Index &kernelIndex,
+ const mir::Shape &strides, const mir::Index &paddings);
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "conv_2D.h"
#include "common.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
-using namespace nncc::contrib::core::data;
+using namespace mir;
+using namespace mir::ops;
Index reduce(const Index &idx)
{
assert(_op.getPadding(2) == 0);
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "OperationImpl.h"
#include "core/modelIR/operations/conv_2d_op.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-using nncc::contrib::core::IR::model::ops::Conv2DOp;
-using nncc::contrib::core::IR::model::ops::PaddingType;
class Conv2D : public OperationImpl<float>
{
public:
- explicit Conv2D(const TensorVariant &input, const Conv2DOp &op);
- std::vector<TensorVariant> operator()() override;
+ explicit Conv2D(const mir::TensorVariant &input, const mir::ops::Conv2DOp &op);
+ std::vector<mir::TensorVariant> operator()() override;
private:
- const Tensor<float> _input;
- Tensor<float> _kernel;
- const Shape _strides;
- const PaddingType _padding;
- const Shape &_out_shape;
- const Conv2DOp &_op;
+ const mir::Tensor<float> _input;
+ mir::Tensor<float> _kernel;
+ const mir::Shape _strides;
+ const mir::ops::PaddingType _padding;
+ const mir::Shape &_out_shape;
+ const mir::ops::Conv2DOp &_op;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_CONV2D_IMPL
#include "conv_FFT.h"
#include "common.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
+namespace nnc
{
-using namespace nncc::contrib::core::data;
+using namespace mir;
+using namespace mir::ops;
// Mostly compatible with tensorflow implementation
// Assuming input is in NHWC format with batch omitted( [in_height, in_width, in_channels] )
}
}
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "OperationImpl.h"
#include "core/modelIR/operations/conv_2d_op.h"
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace interpreter
-{
-namespace impl
-{
-using nncc::contrib::core::IR::model::ops::Conv2DOp;
-using nncc::contrib::core::IR::model::ops::PaddingType;
-using nncc::contrib::core::data::Tensor;
typedef std::complex<float> FFT_complex;
class Conv2D_FFT : public OperationImpl<float>
{
public:
- explicit Conv2D_FFT(const TensorVariant &input, const Conv2DOp &op);
- std::vector<TensorVariant> operator()() override;
+ explicit Conv2D_FFT(const mir::TensorVariant &input, const mir::ops::Conv2DOp &op);
+ std::vector<mir::TensorVariant> operator()() override;
protected:
///
/// Pad input (with zeroes) according to selected padding type (paddings are calculated in ShapeInference)
///
- std::vector<FFT_complex> pad_input(const Index &pads);
+ std::vector<FFT_complex> pad_input(const mir::Index &pads);
///
/// Unpack kernels for each out_channel and pad them with zeroes to input size
///
- std::vector<std::vector<FFT_complex>> unpack_and_pad_kernels(const Shape &paddedInputShape, const uint64_t spectreSize);
+ std::vector<std::vector<FFT_complex>> unpack_and_pad_kernels(const mir::Shape &paddedInputShape, const uint64_t spectreSize);
///
/// This function performs elementwise product of input by each kernel
///
/// Perform Inverse Fast Fourier transform on elementwise products results. Return result of the convolution.
///
- TensorVariant ifft(std::vector<std::vector<FFT_complex>> &spectres,
- const Shape &inShape,
- const Shape &outShape,
- const Shape &strides,
- const Index &paddings);
+ mir::TensorVariant ifft(std::vector<std::vector<FFT_complex>> &spectres,
+ const mir::Shape &inShape,
+ const mir::Shape &outShape,
+ const mir::Shape &strides,
+ const mir::Index &paddings);
///
/// Separate even/odd elements to lower/upper halves of array respectively.
void ifft_CT(FFT_complex* array, const uint64_t elements);
private:
- const Tensor<float> _input;
- Tensor<float> _kernel;
- const Shape _strides;
- const PaddingType _padding;
- const Shape &_out_shape;
- const Conv2DOp &_op;
+ const mir::Tensor<float> _input;
+ mir::Tensor<float> _kernel;
+ const mir::Shape _strides;
+ const mir::ops::PaddingType _padding;
+ const mir::Shape &_out_shape;
+ const mir::ops::Conv2DOp &_op;
};
-} // namespace impl
-} // namespace interpreter
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_CORE_BACKEND_INTERPRETER_CONV2D_FFT_IMPL_
#include <fcntl.h>
using namespace std;
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::core::IR::model;
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
namespace
BaseCodeGenerator::BaseCodeGenerator()
{
- string basePath = clopt::artifactDir + "/" + clopt::artifactName;
+ string basePath = cli::artifactDir + "/" + cli::artifactName;
_headerPath = basePath + ".h";
_codePath = basePath + ".cpp";
_paramsPath = basePath + ".params";
PassData BaseCodeGenerator::run(PassData data)
{
- auto g = static_cast<Graph *>(data);
+ auto g = static_cast<mir::Graph *>(data);
assert(g);
// inference shapes
- core::IR::model::ShapeInference si;
+ mir::ShapeInference si;
g->accept(&si);
// visit and analyze graph
ModelAnalyzer ma;
// rename tensors for specific backend language
formatTensorNames(ma);
- createDir(clopt::artifactDir);
+ createDir(cli::artifactDir);
// Print header
auto headerStream = getStream(_headerPath);
return nullptr;
}
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "ModelAnalyzer.h"
using namespace std;
-using namespace nncc::contrib;
-using namespace nncc::contrib::core::IR::model;
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
void CCodeGenerator::formatTensorNames(const ModelAnalyzer &ma)
return instance;
}
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "option/Options.h"
using namespace std;
-using namespace nncc::contrib;
-using namespace nncc::contrib::core::IR::model;
#include "CommonData.def"
#include "cpp_dropout.generated.h"
#include "cpp_batchnorm.generated.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
using TensorType = TensorDescription::Type;
out << "bool " << className << "::set" << setterName << "(const Tensor& t)\n"
"{\n";
// need to insert input correctness check
- const core::data::Shape expected = td._shape;
+ const mir::Shape expected = td._shape;
int rank = expected.rank();
if (rank != 0)
{
{
string className = ma.getModelName() + "Model";
- out << "#include \"" << clopt::artifactName << ".h\"\n";
+ out << "#include \"" << cli::artifactName << ".h\"\n";
// put operations from tflite
out.write(eigen, sizeof(eigen));
return cppCodeGenerator;
}
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
using namespace std;
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::Index;
-using nncc::contrib::core::data::ShapeRange;
-using nncc::contrib::core::ADT::TensorVariant;
+using namespace nnc::mir;
-void ModelAnalyzer::addOpDescr(ADT::INode *node, const string &opName)
+void ModelAnalyzer::addOpDescr(INode *node, const string &opName)
{
OpDescr::Type type = OpDescr::Type::ORDINARY;
vector<size_t> nodeOutputs;
}
// process node inputs
vector<size_t> nodeInputs;
- for (const ADT::INode::IODescriptor &d: node->getPrevNodes())
+ for (const INode::IODescriptor &d: node->getPrevNodes())
{
size_t idx = d.index;
- ADT::INode *node = d.node;
+ INode *node = d.node;
assert(_nodeToDescr.find(node) != _nodeToDescr.end());
const OpDescr &descr = *_nodeToDescr[node];
const size_t &inTid = descr._outputs[idx];
return id;
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::ConcatOp &op)
+void ModelAnalyzer::visit(INode *node, ops::ConcatOp &op)
{
addOpDescr(node, "concat");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::Conv2DOp &op)
+void ModelAnalyzer::visit(INode *node, ops::Conv2DOp &op)
{
addOpDescr(node, "conv2d");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op)
+void ModelAnalyzer::visit(INode *node, ops::DepthwiseConv2DOp &op)
{
addOpDescr(node, "depthwiseConv2d");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::SoftmaxOp &op)
+void ModelAnalyzer::visit(INode *node, ops::SoftmaxOp &op)
{
addOpDescr(node, "softmax");
}
* Model Ir does not separate different types of pool operations, but for code generation
* it is easier to implement different types of pooling by different functions
*/
-void ModelAnalyzer::visit(ADT::INode *node, ops::PoolOp &op)
+void ModelAnalyzer::visit(INode *node, ops::PoolOp &op)
{
const char *funcName = nullptr;
switch (op.getPoolingType())
addOpDescr(node, funcName);
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::FullyConnectedOp &op)
+void ModelAnalyzer::visit(INode *node, ops::FullyConnectedOp &op)
{
addOpDescr(node, "fullConnect");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::CappedReluOp &op)
+void ModelAnalyzer::visit(INode *node, ops::CappedReluOp &op)
{
addOpDescr(node, "cappedRelu");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::BiasAddOp &op)
+void ModelAnalyzer::visit(INode *node, ops::BiasAddOp &op)
{
addOpDescr(node, "biasAdd");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::VariableOp &op)
+void ModelAnalyzer::visit(INode *node, ops::VariableOp &op)
{
assert(node->getPrevNodes().empty());
addOpDescr(node, "in");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::ReluOp &op)
+void ModelAnalyzer::visit(INode *node, ops::ReluOp &op)
{
addOpDescr(node, "relu");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::ReshapeOp &op)
+void ModelAnalyzer::visit(INode *node, ops::ReshapeOp &op)
{
addOpDescr(node, "reshape");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::DropoutOp &op)
+void ModelAnalyzer::visit(INode *node, ops::DropoutOp &op)
{
addOpDescr(node, "dropout");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::ScaleOp &op)
+void ModelAnalyzer::visit(INode *node, ops::ScaleOp &op)
{
addOpDescr(node, "scale");
}
-void ModelAnalyzer::visit(ADT::INode *node, ops::BatchNormOp &op)
+void ModelAnalyzer::visit(INode *node, ops::BatchNormOp &op)
{
addOpDescr(node, "batchNorm");
}
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include <cassert>
#include <limits>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
-{
-
-namespace model = nncc::contrib::core::IR::model;
-namespace ADT = model::ADT;
-namespace ops = model::ops;
const size_t INVALID_TENSOR_ID = std::numeric_limits<size_t>::max();
Type _type;
std::string _name;
// if _shape.rank() == 0 - assume shape is not known for this tensor on compilation
- core::data::Shape _shape;
+ mir::Shape _shape;
};
/**
};
Type _type;
- ADT::INode *_node;
+ mir::INode *_node;
std::string _opName;
// list of input tensors
std::vector<size_t> _inputs;
* @brief Constructs inference sequence for given computational graph,
* gathers list of variables used in artifact.
*/
-class ModelAnalyzer: public model::IVisitor
+class ModelAnalyzer: public mir::IVisitor
{
public:
- void visit(ADT::INode *node, ops::ConcatOp &op) override;
- void visit(ADT::INode *node, ops::Conv2DOp &op) override;
- void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) override;
- void visit(ADT::INode *node, ops::SoftmaxOp &op) override;
- void visit(ADT::INode *node, ops::PoolOp &op) override;
- void visit(ADT::INode *node, ops::FullyConnectedOp &op) override;
- void visit(ADT::INode *node, ops::CappedReluOp &op) override;
- void visit(ADT::INode *node, ops::BiasAddOp &op) override;
- void visit(ADT::INode *node, ops::VariableOp &op) override;
- void visit(ADT::INode *node, ops::ReluOp &op) override;
- void visit(ADT::INode *node, ops::ReshapeOp &op) override;
- void visit(ADT::INode *node, ops::ScaleOp &op) override;
- void visit(ADT::INode *node, ops::BatchNormOp &op) override;
- void visit(ADT::INode *node, ops::DropoutOp &op) override;
+ void visit(mir::INode *node, mir::ops::ConcatOp &op) override;
+ void visit(mir::INode *node, mir::ops::Conv2DOp &op) override;
+ void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override;
+ void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override;
+ void visit(mir::INode *node, mir::ops::PoolOp &op) override;
+ void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override;
+ void visit(mir::INode *node, mir::ops::CappedReluOp &op) override;
+ void visit(mir::INode *node, mir::ops::BiasAddOp &op) override;
+ void visit(mir::INode *node, mir::ops::VariableOp &op) override;
+ void visit(mir::INode *node, mir::ops::ReluOp &op) override;
+ void visit(mir::INode *node, mir::ops::ReshapeOp &op) override;
+ void visit(mir::INode *node, mir::ops::ScaleOp &op) override;
+ void visit(mir::INode *node, mir::ops::BatchNormOp &op) override;
+ void visit(mir::INode *node, mir::ops::DropoutOp &op) override;
/**
* @return vector of id's of network input tensors
* Inserts information about CG operation into inference sequence: name of operation,
* creates tensors for operation outputs, binds operation inputs with tensors from previous operations
*/
- void addOpDescr(ADT::INode *node, const std::string &name);
+ void addOpDescr(mir::INode *node, const std::string &name);
enum class TensorType
{
*/
size_t allocateTensor(const std::string &name = std::string(),
TensorDescription::Type type = TensorDescription::Type::ORDINARY,
- core::data::Shape *shape = nullptr);
+ mir::Shape *shape = nullptr);
std::string _modelName = "NN";
std::list<OpDescr> _inferenceSequence;
std::vector<size_t> _named_tensors;
std::vector<size_t> _outputs;
std::vector<TensorDescription> _tensors;
- std::map<const ADT::INode *, OpDescr *> _nodeToDescr;
+ std::map<const mir::INode *, OpDescr *> _nodeToDescr;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_SOFT_BACKEND_MODEL_ANALYZER_H_
#define UNUSED(x) ((void)(x))
-namespace nncc
-{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
+namespace nnc
{
static_assert(std::numeric_limits<float>::is_iec559, "Unsupported float type");
using namespace std;
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::Index;
-using nncc::contrib::core::data::ShapeRange;
-using nncc::contrib::core::data::util::transposeTensor;
-using nncc::contrib::core::ADT::TensorVariant;
+using nnc::mir::Shape;
+using nnc::mir::Index;
+using nnc::mir::ShapeRange;
+using nnc::mir::transposeTensor;
+using nnc::mir::TensorVariant;
+using nnc::mir::INode;
+
+namespace ops = nnc::mir::ops;
namespace
{
}
}
-void Serializer::visit(ADT::INode *node, ops::ConcatOp &op)
+void Serializer::visit(INode *node, ops::ConcatOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
// axis number should fit into one byte
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(ADT::INode *node, ops::Conv2DOp &op)
+void Serializer::visit(INode *node, ops::Conv2DOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
// serialize kernel
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op)
+void Serializer::visit(INode *node, ops::DepthwiseConv2DOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
// serialize kernel
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(ADT::INode *node, ops::SoftmaxOp &op)
+void Serializer::visit(INode *node, ops::SoftmaxOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
// axis number should fit into one byte
serializeT<int32_t>(op.getAxis());
}
-void Serializer::visit(ADT::INode *node, ops::PoolOp &op)
+void Serializer::visit(INode *node, ops::PoolOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
// serialize window shape
borderType = PoolBorderType::ZEROFILLED;
break;
default:
- throw pass::PassException("Unsupported border type in pooling");
+ throw PassException("Unsupported border type in pooling");
}
serializeT<int32_t>(etoi(borderType));
// serialize output shape
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(ADT::INode *node, ops::FullyConnectedOp &op)
+void Serializer::visit(INode *node, ops::FullyConnectedOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
shared_ptr<TensorVariant> weights = make_shared<TensorVariant>(op.getWeights());
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(ADT::INode *node, ops::CappedReluOp &op)
+void Serializer::visit(INode *node, ops::CappedReluOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getCap());
}
-void Serializer::visit(ADT::INode *node, ops::BiasAddOp &op)
+void Serializer::visit(INode *node, ops::BiasAddOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
serializeTensor(op.getWeights());
}
-void Serializer::visit(ADT::INode *node, ops::VariableOp &op)
+void Serializer::visit(INode *node, ops::VariableOp &op)
{
// no parameters to dump
}
-void Serializer::visit(ADT::INode *node, ops::ReluOp &op)
+void Serializer::visit(INode *node, ops::ReluOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
// no parameters to dump
}
-void Serializer::visit(ADT::INode *node, ops::ReshapeOp &op)
+void Serializer::visit(INode *node, ops::ReshapeOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
serializeShape(op.getOutputShape(0));
}
-void Serializer::visit(ADT::INode *node, ops::BatchNormOp &op)
+void Serializer::visit(INode *node, ops::BatchNormOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getEps());
serializeT<int32_t>(op.getSpatial());
}
-void Serializer::visit(ADT::INode *node, ops::ScaleOp &op)
+void Serializer::visit(INode *node, ops::ScaleOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
serializeTensor(op.getWeights());
}
-void Serializer::visit(ADT::INode *node, ops::DropoutOp &op)
+void Serializer::visit(INode *node, ops::DropoutOp &op)
{
_curOp->_paramStartOffset = _buffer.size();
serializeT<float>(op.getRate());
{
for (OpDescr &descr: inferenceSequence)
{
- ADT::INode *node = descr._node;
+ INode *node = descr._node;
_curOp = &descr;
node->accept(this);
}
}
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include <vector>
#include <cstdint>
-namespace nncc
+namespace nnc
{
-namespace contrib
-{
-namespace backend
-{
-namespace soft
-{
-
-namespace model = nncc::contrib::core::IR::model;
-namespace ADT = model::ADT;
-namespace ops = model::ops;
/**
* @brief Serializer of network parameters for soft backend
* To gather this vector use `getBuffer` method.
* Objects of this class are one-off and not designed to serialize more than one IR
*/
-class Serializer: public model::IVisitor
+class Serializer: public mir::IVisitor
{
public:
- void visit(ADT::INode *node, ops::ConcatOp &op) override;
- void visit(ADT::INode *node, ops::Conv2DOp &op) override;
- void visit(ADT::INode *node, ops::DepthwiseConv2DOp &op) override;
- void visit(ADT::INode *node, ops::SoftmaxOp &op) override;
- void visit(ADT::INode *node, ops::PoolOp &op) override;
- void visit(ADT::INode *node, ops::FullyConnectedOp &op) override;
- void visit(ADT::INode *node, ops::CappedReluOp &op) override;
- void visit(ADT::INode *node, ops::BiasAddOp &op) override;
- void visit(ADT::INode *node, ops::VariableOp &op) override;
- void visit(ADT::INode *node, ops::ReluOp &op) override;
- void visit(ADT::INode *node, ops::ReshapeOp &op) override;
- void visit(ADT::INode *node, ops::ScaleOp &op) override;
- void visit(ADT::INode *node, ops::BatchNormOp &op) override;
- void visit(ADT::INode *node, ops::DropoutOp &op) override;
+ void visit(mir::INode *node, mir::ops::ConcatOp &op) override;
+ void visit(mir::INode *node, mir::ops::Conv2DOp &op) override;
+ void visit(mir::INode *node, mir::ops::DepthwiseConv2DOp &op) override;
+ void visit(mir::INode *node, mir::ops::SoftmaxOp &op) override;
+ void visit(mir::INode *node, mir::ops::PoolOp &op) override;
+ void visit(mir::INode *node, mir::ops::FullyConnectedOp &op) override;
+ void visit(mir::INode *node, mir::ops::CappedReluOp &op) override;
+ void visit(mir::INode *node, mir::ops::BiasAddOp &op) override;
+ void visit(mir::INode *node, mir::ops::VariableOp &op) override;
+ void visit(mir::INode *node, mir::ops::ReluOp &op) override;
+ void visit(mir::INode *node, mir::ops::ReshapeOp &op) override;
+ void visit(mir::INode *node, mir::ops::ScaleOp &op) override;
+ void visit(mir::INode *node, mir::ops::BatchNormOp &op) override;
+ void visit(mir::INode *node, mir::ops::DropoutOp &op) override;
void serialize(std::list<OpDescr> &inferenceSequence);
* @brief Serialize Tensor shape object
* @param s shape to serialize
*/
- void serializeShape(const nncc::contrib::core::data::Shape &s);
+ void serializeShape(const nnc::mir::Shape &s);
/**
* @brief Function serializes type of given tensor base data,
* it's shape and raw data in 'c' format(i.e. layout of multidimensional C array)
* @param t Tensor to serialize
*/
- void serializeTensor(const contrib::core::ADT::TensorVariant &t);
+ void serializeTensor(const mir::TensorVariant &t);
/**
* @brief Serialize pads for operations like Conv2D
* @tparam Op Operation type
std::vector<char> _buffer;
};
-} // namespace soft
-} // namespace backend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif //_NNC_SOFT_BACKEND_SERIALIZER_H_
static std::ostream &operator<<(std::ostream &os, Padding pad);
static std::ostream &operator<<(std::ostream &os, ActivationFunctionType act);
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
static std::ostream &operator<<(std::ostream &os, const flatbuffers::Vector<int32_t> *v)
{
using namespace v3_tflite;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
};
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
} // namespace nnc
#endif // NNCC_TFLITE_DUMP_VISITOR_H
#include "tflite_v3_importer.h"
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
PassData TFLiteFrontend::run(PassData data)
{
- nncc::contrib::frontend::tflite::v3::TfliteImporter importer{clopt::inputFile};
+ nnc::tflite::v3::TfliteImporter importer{cli::inputFile};
bool success = importer.import();
if (!success)
{
- throw PassException("Could not load model: " + clopt::inputFile + "\n");
+ throw PassException("Could not load model: " + cli::inputFile + "\n");
}
- return reinterpret_cast<Graph *>(importer.createIR());
+ return reinterpret_cast<mir::Graph *>(importer.createIR());
}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
* limitations under the License.
*/
-using namespace nncc::contrib::frontend::common;
class TfliteImporter : NNImporter
{
#include "tflite_ir_visitor.h"
#include "tflite_op_creator.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
-using nncc::contrib::core::data::Index;
-using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp;
-using nncc::contrib::core::data::Shape;
-using nncc::contrib::core::data::util::transposeTensor;
+using nnc::mir::Index;
+using VariableOp = nnc::mir::ops::VariableOp;
+using nnc::mir::Shape;
+using nnc::mir::transposeTensor;
IrVisitor::IrVisitor()
{
auto node = graph->create<VariableOp>(t->name()->c_str());
opsForTensorsTheyOutput[i] = node;
- Shape inputShape = common::ShapeHelper::createShape(*t->shape(), t->shape()->size());
+ Shape inputShape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
// So far we assume that if the first dimension is equal to 1,
// then it is the batch dimension and should be ignored
- common::ShapeHelper::cutOffBatchDim(inputShape);
+ ShapeHelper::cutOffBatchDim(inputShape);
node->getOperation()->setOutputShape(0, inputShape);
}
}
EnumNamesTensorType()[t->type()]);
}
- Shape tensorShape = common::ShapeHelper::createShape(*t->shape(), t->shape()->size());
+ Shape tensorShape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
return std::make_shared<IrTensor>(tensorShape, tensorBufferCopy, type, elementSize);
}
}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "tflite_visitor.h"
#include "tflite_op_creator.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
using namespace v3_tflite;
-using nncc::contrib::core::IR::model::Graph;
-using nncc::contrib::core::IR::model::ADT::INode;
-using IrTensor = nncc::contrib::core::ADT::TensorVariant;
+using nnc::mir::Graph;
+using nnc::mir::INode;
+using IrTensor = nnc::mir::TensorVariant;
class IrVisitor : public Visitor
{
};
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_TFLITE_IR_VISITOR_H
#include "core/modelIR/operations/reshape_op.h"
#include "pass/PassException.h"
-using namespace nncc::contrib::pass;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
// TODO: we should also support "-1" values in new_shape, which means that correct
// shape values must be calculated. Better do it in the shape inference module.
- Shape newShape = common::ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
+ Shape newShape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
outputs[0]->getOperation()->setOutputShape(0, newShape);
return outputs;
}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "schema_v3.h"
#include "passes/common_frontend/shape_helper.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
using namespace v3_tflite;
-namespace ops = nncc::contrib::core::IR::model::ops;
-using nncc::contrib::core::IR::model::Graph;
-using nncc::contrib::core::IR::model::ADT::INode;
-using IrTensor = nncc::contrib::core::ADT::TensorVariant;
-using nncc::contrib::core::data::Shape;
+namespace ops = nnc::mir::ops;
+using nnc::mir::Graph;
+using nnc::mir::INode;
+using IrTensor = nnc::mir::TensorVariant;
+using nnc::mir::Shape;
class OpCreator
{
}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_TFLITE_OP_CREATOR_H
#include "tflite_dump_visitor.h"
#include "tflite_walker.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
} // namespace v3
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#include "passes/common_frontend/nn_importer.h"
#include "passes/common_frontend/model_allocation.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
#include "tflite_importer.inline.h"
} // namespace v3
-} // namespace contrib
} // namespace tflite
-} // namespace frontend
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_TFLITE_V3_IMPORTER_H
using namespace v3_tflite;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
};
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_TFLITE_VISITOR_H
#include "tflite_walker.h"
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
void ModelWalker::walkContents(const Operator *) {}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
using namespace v3_tflite;
-namespace nncc
-{
-namespace contrib
-{
-namespace frontend
+namespace nnc
{
namespace tflite
{
}
} // namespace tflite
-} // namespace frontend
-} // namespace contrib
-} // namespace nncc
+} // namespace nnc
#endif // NNCC_TFLITE_WALKER_H
#include <unistd.h>
#include <dirent.h>
-using namespace nncc::contrib::clopt;
-
-namespace nncc
-{
-namespace contrib
+namespace nnc
{
-namespace clopt
+namespace cli
{
void checkInFile(const Option<std::string> &in_file)
{
}
} // checkDebugFile
-} // clopt
-} // contirb
-} // nncc
+} // namespace cli
+} // namespace ncc
#include "support/CommandLine.h"
-using namespace nncc::contrib::clopt;
-
-namespace nncc
-{
-namespace contrib
+namespace nnc
{
-namespace clopt
+namespace cli
{
static std::vector<std::string> splitByComma(const char *str)
this->setRawValue(this->template convToNum<uint64_t>(val));
}
-} // namespace clopt
-} // namespace contrib
-} // namespace nncc
\ No newline at end of file
+} // namespace cli
+} // namespace nnc
\ No newline at end of file
#include "caffe_importer.h"
-using namespace nncc::contrib;
+using namespace nnc;
int main(int argc, const char **argv)
{
return 1;
}
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv);
- std::string modelName = clopt::inputFile;
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv);
+ std::string modelName = cli::inputFile;
- nncc::contrib::frontend::caffe::CaffeImporter importer{modelName};
+ nnc::caffe::CaffeImporter importer{modelName};
bool success = importer.import();
#include "tflite_v3_importer.h"
-using namespace nncc::contrib;
+using namespace nnc;
int main(int argc, const char **argv)
{
return 1;
}
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv);
- std::string modelName = clopt::inputFile;
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv);
+ std::string modelName = cli::inputFile;
- nncc::contrib::frontend::tflite::v3::TfliteImporter importer{modelName};
+ nnc::tflite::v3::TfliteImporter importer{modelName};
bool success = importer.import();
#include "graph_creator.h"
#include "op_info_util.h"
-using namespace nncc::contrib::frontend::common;
-using namespace nncc::contrib::core::IR::model;
+using namespace nnc;
+using namespace nnc::mir;
+
static INode::Ref createFullyConnected(std::unique_ptr<Graph>& g, const opinfo::OperatorInfo* opInfo)
{
#include "core/modelIR/graph.h"
-std::unique_ptr<nncc::contrib::core::IR::model::Graph> make_graph(const opinfo::OperatorInfo* opInfo);
+std::unique_ptr<nnc::mir::Graph> make_graph(const opinfo::OperatorInfo* opInfo);
#endif // NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H
#include "core/modelIR/Shape.h"
#include "op_info_util.h"
+using namespace nnc;
+using namespace nnc::mir;
+
std::shared_ptr<TensorVariant> getTensor(const opinfo::Tensor* t)
{
std::shared_ptr<char> tensorBufferCopy(
*/
__attribute__ ((unused)) void printTensor(const TensorVariant& lhs)
{
- using nncc::contrib::core::data::ShapeRange;
- using nncc::contrib::core::data::Tensor;
+ using nnc::mir::ShapeRange;
+ using nnc::mir::Tensor;
Tensor<float> accessor(lhs);
void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs)
{
- using nncc::contrib::core::data::ShapeRange;
- using nncc::contrib::core::data::Tensor;
+ using nnc::mir::ShapeRange;
+ using nnc::mir::Tensor;
const int GTEST_FLOAT_EQ_ULP = 4;
}
// Having to put print operator to the same namespace as Shape so that it can be found
-namespace nncc
-{
-namespace core
+namespace nnc
{
-namespace ADT
+namespace mir
{
namespace tensor
{
}
} // namespace tensor
-} // namespace ADT
-} // namespace core
-} // namespace nncc
+} // namespace mir
+} // namespace nnc
#include "passes/common_frontend/shape_helper.h"
#include "graph_creator.h"
-using namespace nncc::contrib::frontend::common;
-using namespace nncc::contrib::core::IR::model;
-
-std::shared_ptr<TensorVariant> getTensor(const opinfo::Tensor* t);
-std::shared_ptr<TensorVariant> getKernel(const opinfo::OperatorInfo* opInfo);
-ops::PaddingType getPaddingType(const opinfo::OperatorInfo* opInfo);
-ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo);
-Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n);
+
+std::shared_ptr<nnc::mir::TensorVariant> getTensor(const opinfo::Tensor* t);
+std::shared_ptr<nnc::mir::TensorVariant> getKernel(const opinfo::OperatorInfo* opInfo);
+nnc::mir::ops::PaddingType getPaddingType(const opinfo::OperatorInfo* opInfo);
+nnc::mir::ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo);
+nnc::mir::Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n);
int getAxis(const opinfo::OperatorInfo* opInfo);
-__attribute__ ((unused)) void printTensor(const TensorVariant& lhs);
-void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs);
+__attribute__ ((unused)) void printTensor(const nnc::mir::TensorVariant& lhs);
+void assertTensorEq(const nnc::mir::TensorVariant &lhs, const nnc::mir::TensorVariant &rhs);
#endif // NNC_INTERPRETER_OP_TEST_UTIL_H
#include "graph_creator.h"
using namespace opinfo;
-using namespace nncc::contrib::core::IR::model;
-using namespace nncc::contrib::backend::interpreter;
+using namespace nnc;
+using namespace nnc::mir;
extern std::string opInfoBuf;
extern const OperatorInfoList* list;
const OperatorInfo* opInfo = GetParam();
std::unique_ptr<Graph> g = make_graph(opInfo);
- core::NNInterpreter interpreter;
+ mir::NNInterpreter interpreter;
for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i)
{
using namespace std;
-using namespace nncc::contrib;
-using namespace nncc::contrib::core::data;
-using namespace nncc::contrib::core::IR::model;
+using namespace nnc;
+using namespace nnc::mir;
+
// Creates simple graph with input and output
void fillGraph(Graph &g)
int main(int argc, const char *argv[])
{
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- std::string outputDir = clopt::artifactDir;
- std::string artifactName = clopt::artifactName;
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
+ std::string outputDir = cli::artifactDir;
+ std::string artifactName = cli::artifactName;
Graph g;
fillGraph(g);
- nncc::contrib::backend::soft::CPPCodeGenerator::getInstance().run(&g);
+ nnc::CPPCodeGenerator::getInstance().run(&g);
string basePath = outputDir + "/" + artifactName;
#include "gtest/gtest.h"
-using namespace nncc::contrib::core::IR::model;
-using nncc::contrib::core::data::Shape;
+using namespace nnc::mir;
TEST(ShapeInferenceTest, ReshapeAutoDimension) {
Graph g;
#include "gtest/gtest.h"
#include "core/modelIR/ShapeRange.h"
-namespace {
+using namespace nnc::mir;
-using namespace nncc::contrib::core::data;
+namespace {
struct ParamType {
uint32_t actual_length;
#include <gtest/gtest.h>
-using namespace nncc::contrib::core::ADT;
-using namespace nncc::contrib::core::data;
+using namespace nnc::mir;
TEST(TensorVariant, BasicTest) {
Shape shape{2,2};
#include "core/modelIR/ShapeRange.h"
#include "core/modelIR/Tensor.h"
-using namespace nncc::contrib::core;
-using namespace nncc::contrib::core::data;
-using namespace nncc::contrib::core::ADT;
+using namespace nnc::mir;
const double EPS = 0.0000001;
#include <gtest/gtest.h>
+using namespace nnc::mir;
+
TEST(IRNode, ConnectionTest) {
- using namespace nncc::contrib::core::IR::model;
auto node1 = Node<ops::ReshapeOp>::createNode("node1", 0);
auto node2 = Node<ops::ReshapeOp>::createNode("node2", 1);
#include <gtest/gtest.h>
-using namespace nncc::contrib::core::IR::model;
-using namespace nncc::contrib::core::data;
+using namespace nnc::mir;
TEST(OpDescription, InputOutputShapeTest) {
Shape inShape{1,2,3};
#include "core/serialize/Serializer.h"
#include "core/modelIR/ShapeRange.h"
-using namespace nncc::contrib::core;
-using namespace nncc::contrib::core::data;
-using namespace nncc::contrib::core::ADT;
+using namespace nnc::mir;
const double EPS = 0.0000001;
#include "gtest/gtest.h"
-using namespace nncc::contrib::pass;
+using namespace nnc;
const char *ErrorMsg = "error constructor";
#include "gtest/gtest.h"
-using namespace nncc::contrib;
-using namespace nncc::contrib::pass;
-using namespace nncc::contrib::core::IR::model;
+using namespace nnc;
+
class DummyPass1 : public Pass
{
public:
PassData run(PassData data) override
{
- auto graph = static_cast<Graph *>(data);
+ auto graph = static_cast<mir::Graph *>(data);
if ( !graph )
{
public:
PassData run(PassData data) override
{
- auto tv = static_cast<TensorVariant *>(data);
+ auto tv = static_cast<mir::TensorVariant *>(data);
if ( !tv )
{
DummyPass1 pass1;
DummyPass2 pass2;
- Graph g;
+ mir::Graph g;
auto res = pass1.run(&g);
- ASSERT_NE(static_cast<Graph *>(res), nullptr);
+ ASSERT_NE(static_cast<mir::Graph *>(res), nullptr);
ASSERT_THROW(pass2.run(res), PassException);
}
#include "gtest/gtest.h"
using namespace std;
-using namespace nncc::contrib::backend::soft;
-using namespace nncc::contrib::core::IR::model; // ShapeInference and Graph
-using namespace nncc::contrib::core; // data namespace for TensorVariant, Tensor, Index and Shape
-using namespace nncc::contrib::backend::interpreter;
-namespace irOps = IR::model::ops;
+
+using namespace nnc;
+
+namespace irOps = nnc::mir::ops;
/*
This test suite operates with both artifact and NNC tensors:
For example: nShape, aShape, nTensor, aTensor.
Artifact data types are: Tensor, Shape
- NNC data types are: TensorVariant, tensor::Shape, data::Tensor<float>
+ NNC data types are: mir::TensorVariant, tensor::Shape, mir::Tensor<float>
*/
/** Creates graph with one operation generated by opGen function and returns this operation node*/
-INode *fillGraph(Graph &g, function<INode *(Graph &g)> opGen,
- const vector<unique_ptr<TensorVariant>> &inputNTensors)
+mir::INode *fillGraph(mir::Graph &g, function<mir::INode *(mir::Graph &g)> opGen,
+ const vector<unique_ptr<mir::TensorVariant>> &inputNTensors)
{
// Create operation node
- INode *opNode = opGen(g);
+ mir::INode *opNode = opGen(g);
int numInputs = opNode->getPrevNodes().size();
assert(inputNTensors.size() == static_cast<size_t>(numInputs));
for (int i = 0; i < numInputs; ++i)
{
// Create i-th input node
- auto inputNode = g.create<IR::model::ops::VariableOp>("x" + std::to_string(i));
+ auto inputNode = g.create<mir::ops::VariableOp>("x" + std::to_string(i));
// Connect i-th operation input to i-th input node
opNode->connectInputTo(i, inputNode->getOutput(0));
g.markOutput(opNode);
// Run shape inference
- ShapeInference shapeInferencer;
+ mir::ShapeInference shapeInferencer;
g.accept(&shapeInferencer);
return opNode;
}
/** Fills NNC Shape object with data from src container*/
-void fillNShape(data::Shape &nshape, const vector<int> &rawShapeData)
+void fillNShape(mir::Shape &nshape, const vector<int> &rawShapeData)
{
int shapeRank = rawShapeData.size();
nshape.resize(shapeRank);
}
/** Converts NNC Shape to artifact Shape*/
-void copyAShapeFromNShape(Shape &ashape, const data::Shape &src)
+void copyAShapeFromNShape(Shape &ashape, const mir::Shape &src)
{
int shapeRank = src.rank();
ashape.setDims(shapeRank);
}
/** Fills NNC and artifact Shape objects with data from rawShapeData*/
-void fillShapes(data::Shape &nshape, Shape &ashape, const vector<int> &rawShapeData)
+void fillShapes(mir::Shape &nshape, Shape &ashape, const vector<int> &rawShapeData)
{
fillNShape(nshape, rawShapeData);
copyAShapeFromNShape(ashape, nshape);
}
/** Fills NNC tensor with some determined data*/
-void fillNTensor(TensorVariant &dst, float start)
+void fillNTensor(mir::TensorVariant &dst, float start)
{
float t = start;
- data::Tensor<float> wrapper(dst);
- for (data::Index idx: data::ShapeRange(dst.getShape()))
+ mir::Tensor<float> wrapper(dst);
+ for (mir::Index idx: mir::ShapeRange(dst.getShape()))
{
wrapper.at(idx) = sin(t) * 2.0f;
t += 1.0f;
}
}
-TensorVariant createNTensor(data::Shape &shape, float start)
+mir::TensorVariant createNTensor(mir::Shape &shape, float start)
{
shared_ptr<char> dataBuf(
- new char[sizeof(float)*data::num_elements(shape)], default_delete<char[]>());
- TensorVariant tensor(shape, dataBuf, TensorVariant::DTYPE::FLOAT, sizeof(float));
+ new char[sizeof(float)*mir::num_elements(shape)], default_delete<char[]>());
+ mir::TensorVariant tensor(shape, dataBuf, mir::TensorVariant::DTYPE::FLOAT, sizeof(float));
fillNTensor(tensor, start);
return tensor;
}
-/** Converts NNC TensorVariant to artifact Tensor object*/
-void copyATensorFromNTensor(Tensor &dst, TensorVariant &src)
+/** Converts NNC mir::TensorVariant to artifact Tensor object*/
+void copyATensorFromNTensor(Tensor &dst, mir::TensorVariant &src)
{
- data::Tensor<float> wrapper(src);
+ mir::Tensor<float> wrapper(src);
Index artIdx;
int rank = src.getShape().rank();
artIdx.setDims(rank);
- for (data::Index idx: data::ShapeRange(src.getShape()))
+ for (mir::Index idx: mir::ShapeRange(src.getShape()))
{
for (int i = 0; i < rank; ++i)
{
}
/** Fills NNC and artifact tensor objects with some determined data*/
-void fillTensors(unique_ptr<TensorVariant> &nTensor, Tensor &aTensor, const vector<int> &shape, float start)
+void fillTensors(unique_ptr<mir::TensorVariant> &nTensor, Tensor &aTensor, const vector<int> &shape, float start)
{
Shape aShape;
- data::Shape nShape;
+ mir::Shape nShape;
fillShapes(nShape, aShape, shape);
aTensor.reShape(aShape);
shared_ptr<char> dataBuf(
- new char[sizeof(float)*data::num_elements(nShape)], default_delete<char[]>());
- nTensor.reset(new TensorVariant(nShape, dataBuf, TensorVariant::DTYPE::FLOAT, sizeof(float)));
+ new char[sizeof(float)*mir::num_elements(nShape)], default_delete<char[]>());
+ nTensor.reset(new mir::TensorVariant(nShape, dataBuf, mir::TensorVariant::DTYPE::FLOAT, sizeof(float)));
fillNTensor(*nTensor, start);
copyATensorFromNTensor(aTensor, *nTensor);
}
/** Run interpreter to get reference output data*/
-TensorVariant getReferenceTensor(Graph &g, const vector<unique_ptr<TensorVariant>> &inputNTensors)
+mir::TensorVariant getReferenceTensor(mir::Graph &g, const vector<unique_ptr<mir::TensorVariant>> &inputNTensors)
{
- core::NNInterpreter interpreter;
+ mir::NNInterpreter interpreter;
for (int i = 0; i < static_cast<int>(inputNTensors.size()); ++i)
{
interpreter.setInput("x" + to_string(i), *inputNTensors[i]);
return output;
}
-/** Compares nnc TensorVariant and artifact Tensor objects*/
-void compareResults(const TensorVariant &refNTensor, const Tensor &testATensor)
+/** Compares nnc mir::TensorVariant and artifact Tensor objects*/
+void compareResults(const mir::TensorVariant &refNTensor, const Tensor &testATensor)
{
- const data::Shape &tvShape = refNTensor.getShape();
+ const mir::Shape &tvShape = refNTensor.getShape();
const Shape &atShape = testATensor.getShape();
ASSERT_EQ(tvShape.rank(), atShape.getDims());
int rank = tvShape.rank();
}
Index artifactIdx;
artifactIdx.setDims(rank);
- for (data::Index tvIdx: data::ShapeRange(tvShape))
+ for (mir::Index tvIdx: mir::ShapeRange(tvShape))
{
for (int i = 0; i < rank; ++i)
{
artifactIdx[i] = tvIdx.at(i);
}
- assert(refNTensor.getElementSize() == 4L && refNTensor.getDataType() == TensorVariant::DTYPE::FLOAT);
+ assert(refNTensor.getElementSize() == 4L && refNTensor.getDataType() == mir::TensorVariant::DTYPE::FLOAT);
// Input and output data lies in range of [-10, 10], chosen epsilon lies near the edge of float type computational precision
- ASSERT_NEAR(data::Tensor<float>(refNTensor).at(tvIdx), testATensor.at(artifactIdx), 0.00001);
+ ASSERT_NEAR(mir::Tensor<float>(refNTensor).at(tvIdx), testATensor.at(artifactIdx), 0.00001);
}
}
* This function creates test graph, runs interpeter, specifies artifact operation and compares results
*/
template <class TestFunc, class ...Args>
-void createAndRunTestGraph(function<INode *(Graph &)> opGenerator, TestFunc artifactOperation,
- const vector<unique_ptr<TensorVariant>> &inputNTensors, const Args &...inputATensors)
+void createAndRunTestGraph(function<mir::INode *(mir::Graph &)> opGenerator, TestFunc artifactOperation,
+ const vector<unique_ptr<mir::TensorVariant>> &inputNTensors, const Args &...inputATensors)
{
- Graph g;
- INode *actualOperation = fillGraph(g, opGenerator, inputNTensors);
+ mir::Graph g;
+ mir::INode *actualOperation = fillGraph(g, opGenerator, inputNTensors);
// serialize data for soft backend operation
list<OpDescr> inferenceSequence;
serializer.serialize(inferenceSequence);
assert(inferenceSequence.front()._paramStartOffset == 0);
- TensorVariant referenceOutput = getReferenceTensor(g, inputNTensors);
+ mir::TensorVariant referenceOutput = getReferenceTensor(g, inputNTensors);
Tensor testOutput;
artifactOperation(testOutput, serializer.getBuffer().data(), inputATensors...);
TEST(cpp_operations_test, bias)
{
vector<int> inputShapeData{2, 3, 4, 5};
- data::Shape weightsShape{5};
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ mir::Shape weightsShape{5};
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
- TensorVariant weights = createNTensor(weightsShape, 1.0f);
- auto opGenerator = [weights](Graph &g){return g.create<IR::model::ops::BiasAddOp>("y", weights);};
+ mir::TensorVariant weights = createNTensor(weightsShape, 1.0f);
+ auto opGenerator = [weights](mir::Graph &g){return g.create<mir::ops::BiasAddOp>("y", weights);};
createAndRunTestGraph(opGenerator, biasAdd, inputNTensors, aInputTensor);
}
TEST(cpp_operations_test, scale)
{
vector<int> inputShapeData{2, 3, 4, 5};
- data::Shape weightsShape{5};
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ mir::Shape weightsShape{5};
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
- TensorVariant weights = createNTensor(weightsShape, 1.0f);
- auto opGenerator = [weights](Graph &g){return g.create<IR::model::ops::ScaleOp>("y", weights);};
+ mir::TensorVariant weights = createNTensor(weightsShape, 1.0f);
+ auto opGenerator = [weights](mir::Graph &g){return g.create<mir::ops::ScaleOp>("y", weights);};
createAndRunTestGraph(opGenerator, scale, inputNTensors, aInputTensor);
}
float cap = 0.5f;
vector<int> shapeData{2, 3, 4, 5};
Tensor aInputTensor;
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f);
- auto opGenerator = [cap](Graph &g){return g.create<IR::model::ops::CappedReluOp>("y", cap);};
+ auto opGenerator = [cap](mir::Graph &g){return g.create<mir::ops::CappedReluOp>("y", cap);};
createAndRunTestGraph(opGenerator, cappedRelu, inputNTensors, aInputTensor);
}
// set different size for concatenating axis
shape2Data[axis] = 11;
vector<Tensor> inputATensors(2);
- vector<unique_ptr<TensorVariant>> inputNTensors(2);
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(2);
fillTensors(inputNTensors[0], inputATensors[0], shape1Data, 1.0f);
fillTensors(inputNTensors[1], inputATensors[1], shape2Data, 2.0f);
- auto opGenerator = [axis](Graph &g) { return g.create<IR::model::ops::ConcatOp>("y", 2, axis); };
+ auto opGenerator = [axis](mir::Graph &g) { return g.create<mir::ops::ConcatOp>("y", 2, axis); };
createAndRunTestGraph(opGenerator, concat<Tensor, Tensor>, inputNTensors, inputATensors[0], inputATensors[1]);
}
for (iT strideW = 1; strideW <= 3; ++strideW)
{
vector<int> inputShapeData{5, 7, static_cast<int>(inputC)}; // HWC
- data::Shape kernelShape{kernelH, kernelW, inputC, outputC}; // HWCN
- data::Shape strides{strideH, strideW, 1};
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ mir::Shape kernelShape{kernelH, kernelW, inputC, outputC}; // HWCN
+ mir::Shape strides{strideH, strideW, 1};
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
- auto padT = IR::model::ops::PaddingType::Same;
- TensorVariant kernel = createNTensor(kernelShape, 1.0f);
- auto opGenerator = [kernel, strides, padT](Graph &g)
+ auto padT = mir::ops::PaddingType::Same;
+ mir::TensorVariant kernel = createNTensor(kernelShape, 1.0f);
+ auto opGenerator = [kernel, strides, padT](mir::Graph &g)
{
- return g.create<IR::model::ops::Conv2DOp>("y", kernel, strides, padT);
+ return g.create<mir::ops::Conv2DOp>("y", kernel, strides, padT);
};
createAndRunTestGraph(opGenerator, conv2d, inputNTensors, aInputTensor);
for (iT multiplier = 1; multiplier <= 2; ++multiplier)
{
vector<int> inputShapeData{5, 7, static_cast<int>(channels)}; // HWC
- data::Shape kernelShape{kernelH, kernelW, channels, multiplier}; // HWCN
- data::Shape strides{strideH, strideW, 1};
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ mir::Shape kernelShape{kernelH, kernelW, channels, multiplier}; // HWCN
+ mir::Shape strides{strideH, strideW, 1};
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
- auto padT = IR::model::ops::PaddingType::Same;
- TensorVariant kernel = createNTensor(kernelShape, 1.0f);
- auto opGenerator = [kernel, strides, padT](Graph &g) {
- return g.create<IR::model::ops::DepthwiseConv2DOp>("y", kernel, strides, padT);
+ auto padT = mir::ops::PaddingType::Same;
+ mir::TensorVariant kernel = createNTensor(kernelShape, 1.0f);
+ auto opGenerator = [kernel, strides, padT](mir::Graph &g) {
+ return g.create<mir::ops::DepthwiseConv2DOp>("y", kernel, strides, padT);
};
createAndRunTestGraph(opGenerator, depthwiseConv2d, inputNTensors, aInputTensor);
TEST(cpp_operations_test, fully_connected)
{
vector<int> inputShapeData{1, 13};
- data::Shape weightsShape{13, 7};
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ mir::Shape weightsShape{13, 7};
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
Tensor aInputTensor;
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
- TensorVariant weights = createNTensor(weightsShape, 1.0f);
- auto opGenerator = [weights](Graph &g){return g.create<IR::model::ops::FullyConnectedOp>("y", weights);};
+ mir::TensorVariant weights = createNTensor(weightsShape, 1.0f);
+ auto opGenerator = [weights](mir::Graph &g){return g.create<mir::ops::FullyConnectedOp>("y", weights);};
createAndRunTestGraph(opGenerator, fullConnect, inputNTensors, aInputTensor);
}
for (iT strideW = 1; strideW <= 3; ++strideW)
{
vector<int> shapeData{5, 7, static_cast<int>(channels)};
- data::Shape windowShape{windowH, windowW, 1};
- data::Shape strides{strideH, strideW, 1};
+ mir::Shape windowShape{windowH, windowW, 1};
+ mir::Shape strides{strideH, strideW, 1};
auto padT = irOps::PaddingType::Valid;
Tensor aInputTensor;
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f);
for (auto border: borders)
{
- auto opGenerator = [windowShape, strides, padT, border](Graph &g) {
- return g.create<IR::model::ops::PoolOp>("y", windowShape, strides, poolT, padT, border);
+ auto opGenerator = [windowShape, strides, padT, border](mir::Graph &g) {
+ return g.create<mir::ops::PoolOp>("y", windowShape, strides, poolT, padT, border);
};
createAndRunTestGraph(opGenerator, testFunc, inputNTensors, aInputTensor);
vector<irOps::PoolOp::BorderType> borderTypes{
irOps::PoolOp::BorderType::EMPTY
};
- genericPoolTest<IR::model::ops::PoolOp::PoolingType::MAX>(maxPool, borderTypes);
+ genericPoolTest<mir::ops::PoolOp::PoolingType::MAX>(maxPool, borderTypes);
}
TEST(cpp_operations_test, avgpool)
irOps::PoolOp::BorderType::EMPTY,
irOps::PoolOp::BorderType::ZEROFILLED
};
- genericPoolTest<IR::model::ops::PoolOp::PoolingType::AVG>(avgPool, borderTypes);
+ genericPoolTest<mir::ops::PoolOp::PoolingType::AVG>(avgPool, borderTypes);
}
TEST(cpp_operations_test, relu)
// test prerequisites
vector<int> shapeData{2,3,4,5};
Tensor aInputTensor;
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f);
- auto opGenerator = [](Graph &g){return g.create<IR::model::ops::ReluOp>("y");};
+ auto opGenerator = [](mir::Graph &g){return g.create<mir::ops::ReluOp>("y");};
createAndRunTestGraph(opGenerator, relu, inputNTensors, aInputTensor);
}
shapeData.resize(numDims);
int axis = numDims - 1;
Tensor aInputTensor;
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
fillTensors(inputNTensors[0], aInputTensor, shapeData, 1.0f);
- auto opGenerator = [axis](Graph &g) { return g.create<IR::model::ops::SoftmaxOp>("y", axis); };
+ auto opGenerator = [axis](mir::Graph &g) { return g.create<mir::ops::SoftmaxOp>("y", axis); };
createAndRunTestGraph(opGenerator, softmax, inputNTensors, aInputTensor);
}
// test prerequisites
vector<int> inputShapeData{2,3,4,5};
vector<int> outputShapeData{1,120};
- data::Shape nOutputShape;
+ mir::Shape nOutputShape;
fillNShape(nOutputShape, outputShapeData);
Tensor aInputTensor;
- vector<unique_ptr<TensorVariant>> inputNTensors(1);
+ vector<unique_ptr<mir::TensorVariant>> inputNTensors(1);
fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
- auto opGenerator = [nOutputShape](Graph &g)
+ auto opGenerator = [nOutputShape](mir::Graph &g)
{
- auto op = g.create<IR::model::ops::ReshapeOp>("y");
+ auto op = g.create<mir::ops::ReshapeOp>("y");
op->getOperation()->setOutputShape(0, nOutputShape);
return op;
};
#include "support/CommandLine.h"
using namespace std;
-using namespace nncc::contrib;
-using namespace nncc::contrib::backend::soft;
-using namespace nncc::contrib::core;
-using namespace nncc::contrib::core::IR::model;
+
+using namespace nnc;
+using namespace nnc::mir;
static bool isFileExists(const string &path)
{
nullptr};
int argc = (sizeof(argv) / sizeof(argv[0])) - 1;
- clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false);
+ cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- nncc::contrib::core::IR::model::Graph g;
+ nnc::mir::Graph g;
INode *input = g.create<ops::VariableOp>("input");
- input->getOperation()->setOutputShape(0, data::Shape({1,2,3,4}));
+ input->getOperation()->setOutputShape(0, Shape({1,2,3,4}));
INode *output = g.create<ops::ReluOp>("output");
output->connectInputTo(0, input->getOutput(0));
#include "gtest/gtest.h"
-using namespace nncc::contrib::clopt;
+using namespace nnc::cli;
void soption_checker1(const Option<std::string> &opt)