Reformat code according to coding style.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
#include <stdexcept>
#include <vector>
-namespace nnc {
+namespace nnc
+{
using namespace ::caffe2;
using mir::Shape;
Caffe2Importer::Caffe2Importer(std::string predict_net, std::string init_net,
- std::vector<std::vector<int>> input_shapes) :
- _predictNet(std::move(predict_net)), _initNet(std::move(init_net)),
- _graph(new mir::Graph()), _opCreator(new Caffe2OpCreator(_graph)) {
- for (auto& shape : input_shapes)
+ std::vector<std::vector<int>> input_shapes)
+ : _predictNet(std::move(predict_net)), _initNet(std::move(init_net)), _graph(new mir::Graph()),
+ _opCreator(new Caffe2OpCreator(_graph))
+{
+ for (auto &shape : input_shapes)
_inputShapes.emplace_back(shape);
}
Caffe2Importer::~Caffe2Importer() = default;
-void Caffe2Importer::cleanup() {
- delete _graph;
-}
+void Caffe2Importer::cleanup() { delete _graph; }
static void loadModelFile(const std::string &filename, caffe2::NetDef *net)
{
throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
}
-void Caffe2Importer::import() {
+void Caffe2Importer::import()
+{
GOOGLE_PROTOBUF_VERIFY_VERSION;
_net.reset(new NetDef());
preloadAllTensors();
}
-mir::Graph* Caffe2Importer::createIR() {
- for (auto& op : _net->op())
+mir::Graph *Caffe2Importer::createIR()
+{
+ for (auto &op : _net->op())
createMIRNodesFromOp(op);
setGraphOutputs();
return _graph;
}
-void Caffe2Importer::collectUnsupportedOps() {
- for (auto& op : _net->op())
+void Caffe2Importer::collectUnsupportedOps()
+{
+ for (auto &op : _net->op())
collectUnsupportedOp(op);
- if (!_problemsOpSet.empty()) {
+ if (!_problemsOpSet.empty())
+ {
std::string msg("NNC can't load model. Detected problems:");
- for (const auto& problemStr : _problemsOpSet)
+ for (const auto &problemStr : _problemsOpSet)
msg.append("\n * " + problemStr);
throw std::runtime_error(msg);
}
}
-void Caffe2Importer::collectUnsupportedOp(const OperatorDef& op) {
- if (_operatorTypes.find(op.type()) == _operatorTypes.end()) {
+void Caffe2Importer::collectUnsupportedOp(const OperatorDef &op)
+{
+ if (_operatorTypes.find(op.type()) == _operatorTypes.end())
+ {
_problemsOpSet.insert(op.type() + ": unknown layer");
return;
}
SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
- switch (opType) {
+ switch (opType)
+ {
case SupportedCaffe2OpType::FC:
_opCreator->checkFC(op, _problemsOpSet);
break;
}
}
-void Caffe2Importer::preloadAllTensors() {
- for (auto& op : _net->op()) {
+void Caffe2Importer::preloadAllTensors()
+{
+ for (auto &op : _net->op())
+ {
// All tensor values are stored in 'GivenTensorFill' and 'ConstantFill' operators, so skip rest
auto opType = _operatorTypes.at(op.type());
- if ((opType == SupportedCaffe2OpType::givenTensorFill
- || opType == SupportedCaffe2OpType::constantFill
- || opType == SupportedCaffe2OpType::givenTensorInt64Fill)
- && hasArgument(op.arg(), "values")) {
+ if ((opType == SupportedCaffe2OpType::givenTensorFill ||
+ opType == SupportedCaffe2OpType::constantFill ||
+ opType == SupportedCaffe2OpType::givenTensorInt64Fill) &&
+ hasArgument(op.arg(), "values"))
+ {
_MIRTensors.insert(std::make_pair(op.output(0), createTensor(op)));
}
}
}
-void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
- std::vector<mir::Operation::Output*> outputs;
+void Caffe2Importer::createMIRNodesFromOp(const OperatorDef &op)
+{
+ std::vector<mir::Operation::Output *> outputs;
// If op input not met yet - consider it as model input
- if (op.input_size() > 0
- && _blobNameToOutput.find(op.input(0)) == _blobNameToOutput.end()) {
+ if (op.input_size() > 0 && _blobNameToOutput.find(op.input(0)) == _blobNameToOutput.end())
+ {
outputs = _opCreator->createInput(op.input(0), _inputShapes.front());
_blobNameToOutput[op.input(0)] = outputs.at(0);
auto inputs = getInputMIROps(op);
SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
- switch (opType) {
+ switch (opType)
+ {
case SupportedCaffe2OpType::constantFill:
case SupportedCaffe2OpType::givenTensorFill:
case SupportedCaffe2OpType::givenTensorInt64Fill:
assert(false && "All unsupported types should have been found before this pass.");
}
- for (size_t i = 0; i < outputs.size(); ++i) {
+ for (size_t i = 0; i < outputs.size(); ++i)
+ {
// caffe2 input blob name could be same as output blob name, and next line will overwrite
// '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem
_blobNameToOutput[op.output(i)] = outputs.at(i);
_lastMIROp = outputs.at(0)->getNode();
}
-mir::TensorVariant Caffe2Importer::createTensor(const OperatorDef& op) {
+mir::TensorVariant Caffe2Importer::createTensor(const OperatorDef &op)
+{
assert(hasArgument(op.arg(), "shape") && hasArgument(op.arg(), "values"));
- const auto& shape = findArgumentByName(op.arg(), "shape");
- const auto& values = findArgumentByName(op.arg(), "values");
+ const auto &shape = findArgumentByName(op.arg(), "shape");
+ const auto &values = findArgumentByName(op.arg(), "values");
mir::DTYPE element_type;
const SupportedCaffe2OpType op_type = _operatorTypes.at(op.type());
- const void* src_data;
+ const void *src_data;
// if values on floats
- if (!values.floats().empty()) {
+ if (!values.floats().empty())
+ {
element_type = mir::DTYPE::FLOAT32;
src_data = values.floats().data();
- } else {
+ }
+ else
+ {
assert(!values.ints().empty());
- if (op_type == SupportedCaffe2OpType::givenTensorInt64Fill) {
+ if (op_type == SupportedCaffe2OpType::givenTensorInt64Fill)
+ {
element_type = mir::DTYPE::INT64;
- } else {
+ }
+ else
+ {
element_type = mir::DTYPE::INT32;
}
src_data = values.ints().data();
return mir::TensorVariant(element_type, tensor_shape, src_data);
}
-std::vector<mir::Operation::Output*> Caffe2Importer::getInputMIROps(const OperatorDef& op) {
+std::vector<mir::Operation::Output *> Caffe2Importer::getInputMIROps(const OperatorDef &op)
+{
// caffe2 operation inputs not same as MIR inputs (ex: in caffe2 conv kernel and bias also inputs)
// so choose caffe2 inputs, which are 'real' inputs
- std::vector<mir::Operation::Output*> inputs;
+ std::vector<mir::Operation::Output *> inputs;
SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
if (opType != SupportedCaffe2OpType::givenTensorFill &&
opType != SupportedCaffe2OpType::constantFill &&
opType != SupportedCaffe2OpType::givenTensorInt64Fill)
{
- for (auto& i : op.input())
+ for (auto &i : op.input())
if (_blobNameToOutput.find(i) != _blobNameToOutput.end())
inputs.push_back(_blobNameToOutput[i]);
}
return inputs;
}
-void Caffe2Importer::setGraphOutputs() {
+void Caffe2Importer::setGraphOutputs()
+{
// For now, we assume that:
// - there is exactly one output;
// - the output is from the last layer.
}
const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
- {"Add", SupportedCaffe2OpType::add},
- {"AveragePool", SupportedCaffe2OpType::averagePool},
- {"Conv", SupportedCaffe2OpType::conv},
- {"Concat", SupportedCaffe2OpType::concat},
- {"ConstantFill", SupportedCaffe2OpType::constantFill},
- {"Dropout", SupportedCaffe2OpType::dropout},
- {"FC", SupportedCaffe2OpType::FC},
- {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
- {"MaxPool", SupportedCaffe2OpType::maxPool},
- {"Mul", SupportedCaffe2OpType::mul},
- {"Relu", SupportedCaffe2OpType::relu},
- {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
- {"Sigmoid", SupportedCaffe2OpType::sigmoid},
- {"Softmax", SupportedCaffe2OpType::softmax},
- {"SpatialBN", SupportedCaffe2OpType::spatialBN},
- {"Sum", SupportedCaffe2OpType::sum},
- {"Clip", SupportedCaffe2OpType::clip},
- {"Reshape", SupportedCaffe2OpType::reshape},
- {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
+ {"Add", SupportedCaffe2OpType::add},
+ {"AveragePool", SupportedCaffe2OpType::averagePool},
+ {"Conv", SupportedCaffe2OpType::conv},
+ {"Concat", SupportedCaffe2OpType::concat},
+ {"ConstantFill", SupportedCaffe2OpType::constantFill},
+ {"Dropout", SupportedCaffe2OpType::dropout},
+ {"FC", SupportedCaffe2OpType::FC},
+ {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
+ {"MaxPool", SupportedCaffe2OpType::maxPool},
+ {"Mul", SupportedCaffe2OpType::mul},
+ {"Relu", SupportedCaffe2OpType::relu},
+ {"ResizeNearest", SupportedCaffe2OpType::resizeNearest},
+ {"Sigmoid", SupportedCaffe2OpType::sigmoid},
+ {"Softmax", SupportedCaffe2OpType::softmax},
+ {"SpatialBN", SupportedCaffe2OpType::spatialBN},
+ {"Sum", SupportedCaffe2OpType::sum},
+ {"Clip", SupportedCaffe2OpType::clip},
+ {"Reshape", SupportedCaffe2OpType::reshape},
+ {"GivenTensorInt64Fill", SupportedCaffe2OpType::givenTensorInt64Fill},
};
} // namespace nnc
-
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
#include "caffe2_op_creator.h"
#include "caffe2_op_types.h"
-namespace nnc {
+namespace nnc
+{
-class Caffe2Importer {
+class Caffe2Importer
+{
public:
explicit Caffe2Importer(std::string predict_net, std::string init_net,
std::vector<std::vector<int>> input_shapes);
/**
- * @brief Import model from file, must be called before 'createIR' method
- * @throw PassException in case, if model couldn't be parsed or NNC doesn't support it
- */
+ * @brief Import model from file, must be called before 'createIR' method
+ * @throw PassException in case, if model couldn't be parsed or NNC doesn't support it
+ */
void import();
/**
- * @brief Create MIR graph from caffe model, must be called after 'import' method
- * @return MIR graph, corresponding to processed caffe model
- */
- mir::Graph* createIR();
+ * @brief Create MIR graph from caffe model, must be called after 'import' method
+ * @return MIR graph, corresponding to processed caffe model
+ */
+ mir::Graph *createIR();
void cleanup();
private:
std::string _predictNet;
std::string _initNet;
- mir::Graph* _graph;
+ mir::Graph *_graph;
std::unique_ptr<::caffe2::NetDef> _net;
std::unique_ptr<Caffe2OpCreator> _opCreator;
std::vector<mir::Shape> _inputShapes;
std::set<std::string> _problemsOpSet;
// Maps Caffe2 operator input names to corresponding MIR operation outputs.
- std::unordered_map<std::string, mir::Operation::Output*> _blobNameToOutput;
- mir::Operation* _lastMIROp = nullptr;
+ std::unordered_map<std::string, mir::Operation::Output *> _blobNameToOutput;
+ mir::Operation *_lastMIROp = nullptr;
std::map<std::string, mir::TensorVariant> _MIRTensors;
/**
- * @brief Pass through caffe2 graph and collect ops unsupported by NNC
- * @throw PassException with message, containing detected problems
- */
+ * @brief Pass through caffe2 graph and collect ops unsupported by NNC
+ * @throw PassException with message, containing detected problems
+ */
void collectUnsupportedOps();
/**
- * @brief Collecting unsupported parts of caffe2 operator
- */
- void collectUnsupportedOp(const ::caffe2::OperatorDef& op);
+ * @brief Collecting unsupported parts of caffe2 operator
+ */
+ void collectUnsupportedOp(const ::caffe2::OperatorDef &op);
/**
- * @brief Creating MIR node from single caffe2 operator
- */
- void createMIRNodesFromOp(const ::caffe2::OperatorDef& op);
+ * @brief Creating MIR node from single caffe2 operator
+ */
+ void createMIRNodesFromOp(const ::caffe2::OperatorDef &op);
/**
- * @brief Since caffe2 tensor values stored separately (in init_net) - preload them in _MIRTensors
- */
+ * @brief Since caffe2 tensor values stored separately (in init_net) - preload them in _MIRTensors
+ */
void preloadAllTensors();
/**
- * @brief Creates MIR tensor from caffe2 givenTensorFill op
- */
- mir::TensorVariant createTensor(const ::caffe2::OperatorDef& op);
+ * @brief Creates MIR tensor from caffe2 givenTensorFill op
+ */
+ mir::TensorVariant createTensor(const ::caffe2::OperatorDef &op);
/**
- * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
- */
- std::vector<mir::Operation::Output*> getInputMIROps(const ::caffe2::OperatorDef& op);
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
+ */
+ std::vector<mir::Operation::Output *> getInputMIROps(const ::caffe2::OperatorDef &op);
/**
- * @brief Mark output MIR nodes
- */
+ * @brief Mark output MIR nodes
+ */
void setGraphOutputs();
};
#include <set>
#include <vector>
-namespace nnc {
+namespace nnc
+{
static mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel)
{
//
static std::pair<std::vector<int32_t>, std::vector<int32_t>>
-getPadding(const ::caffe2::OperatorDef& op) {
+getPadding(const ::caffe2::OperatorDef &op)
+{
- if (hasArgument(op.arg(), "pads")) {
+ if (hasArgument(op.arg(), "pads"))
+ {
// pads order: t l b r
auto pads_arg = findArgumentByName(op.arg(), "pads");
std::vector<int32_t> paddings;
- for (const auto& pad : pads_arg.ints())
+ for (const auto &pad : pads_arg.ints())
paddings.push_back(static_cast<int32_t>(pad));
assert(paddings.size() == 4);
return {padding_before, padding_after};
}
- bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r")
- || hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
+ bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
+ hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
- if (has_custom_pad) {
+ if (has_custom_pad)
+ {
int32_t pad_l = getSingleArgument(op, "pad_l", 0);
int32_t pad_t = getSingleArgument(op, "pad_t", 0);
int32_t pad_r = getSingleArgument(op, "pad_r", 0);
return {{pad, pad}, {pad, pad}};
};
-static std::vector<int32_t>
-getStrides(const ::caffe2::OperatorDef& op) {
+static std::vector<int32_t> getStrides(const ::caffe2::OperatorDef &op)
+{
std::vector<int32_t> strides;
- if (hasArgument(op.arg(), "stride")) {
+ if (hasArgument(op.arg(), "stride"))
+ {
int stride = getSingleArgument(op, "stride", 1);
strides = {stride, stride};
}
- if (hasArgument(op.arg(), "strides")) {
+ if (hasArgument(op.arg(), "strides"))
+ {
// strides order: h w
auto strides_arg = findArgumentByName(op.arg(), "strides");
- for (const auto& s : strides_arg.ints())
+ for (const auto &s : strides_arg.ints())
strides.push_back(s);
}
return strides;
}
-static Shape getWindowShape(const ::caffe2::OperatorDef& op,
- const std::vector<mir::Operation::Output*>& inputs) {
+static Shape getWindowShape(const ::caffe2::OperatorDef &op,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
- bool has_custom_kernel_size = hasArgument(op.arg(), "kernel_h") ||
- hasArgument(op.arg(), "kernel_w");
+ bool has_custom_kernel_size =
+ hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
bool has_custom_kernels_size = hasArgument(op.arg(), "kernels");
int kernel_h(0), kernel_w(0);
- if (is_global_pooling) {
- const auto& input_shape = inputs[0]->getShape();
+ if (is_global_pooling)
+ {
+ const auto &input_shape = inputs[0]->getShape();
assert(input_shape.rank() == 4 && "getWindowShape() inputs must be of rank 4");
kernel_h = input_shape.dim(2);
kernel_w = input_shape.dim(3);
- } else {
- if (has_custom_kernel_size) {
+ }
+ else
+ {
+ if (has_custom_kernel_size)
+ {
kernel_h = getSingleArgument(op, "kernel_h", 0);
kernel_w = getSingleArgument(op, "kernel_w", 0);
- } else {
- if (has_custom_kernels_size) {
+ }
+ else
+ {
+ if (has_custom_kernels_size)
+ {
// kernels order: h w
std::vector<int32_t> kernels;
auto kernels_arg = findArgumentByName(op.arg(), "kernels");
- for (const auto& ker : kernels_arg.ints())
+ for (const auto &ker : kernels_arg.ints())
kernels.push_back(static_cast<int32_t>(ker));
assert(kernels.size() == 2);
kernel_h = kernels[0];
kernel_w = kernels[1];
- } else {
+ }
+ else
+ {
kernel_h = kernel_w = getSingleArgument(op, "kernel", 0);
}
}
return Shape{kernel_h, kernel_w};
}
-mir::Operation::Output* Caffe2OpCreator::convertCaffeToMIR(mir::Operation::Output* arg) {
+mir::Operation::Output *Caffe2OpCreator::convertCaffeToMIR(mir::Operation::Output *arg)
+{
// NCHW -> NHWC
- auto transpose = createOp<ops::TransposeOp>("CaffeToMIR", arg, std::vector<std::size_t>{0, 2, 3, 1});
+ auto transpose =
+ createOp<ops::TransposeOp>("CaffeToMIR", arg, std::vector<std::size_t>{0, 2, 3, 1});
return transpose->getOutput(0);
}
-mir::Operation::Output* Caffe2OpCreator::convertMIRToCaffe(mir::Operation::Output* arg) {
+mir::Operation::Output *Caffe2OpCreator::convertMIRToCaffe(mir::Operation::Output *arg)
+{
// NHWC -> NCHW
- auto transpose = createOp<ops::TransposeOp>("MIRToCaffe", arg, std::vector<std::size_t>{0, 3, 1, 2});
+ auto transpose =
+ createOp<ops::TransposeOp>("MIRToCaffe", arg, std::vector<std::size_t>{0, 3, 1, 2});
return transpose->getOutput(0);
}
// Check functions
//
-void Caffe2OpCreator::checkConvLikeOp(const ::caffe2::OperatorDef& op,
- std::set<std::string>& problems_ops_set) {
+void Caffe2OpCreator::checkConvLikeOp(const ::caffe2::OperatorDef &op,
+ std::set<std::string> &problems_ops_set)
+{
commonCheck(op, problems_ops_set);
// Padding
- bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r")
- || hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
+ bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
+ hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
if (has_custom_pad && hasArgument(op.arg(), "pad"))
problems_ops_set.insert("Custom pad can't be combined with overall pad");
- if (has_custom_pad && !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r")
- && hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
+ if (has_custom_pad &&
+ !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r") &&
+ hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
problems_ops_set.insert("If one custom pad specified - all custom pads must be specified");
// Kernel size
- bool has_custom_kernel_size = hasArgument(op.arg(), "kernel_h")
- || hasArgument(op.arg(), "kernel_w");
+ bool has_custom_kernel_size =
+ hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
if (has_custom_kernel_size && hasArgument(op.arg(), "kernel"))
problems_ops_set.insert("Custom kernel size can't be combined with overall kernel size");
- if (has_custom_kernel_size && !(hasArgument(op.arg(), "kernel_h")
- && hasArgument(op.arg(), "kernel_w")))
- problems_ops_set.insert("If one custom kernel size specified - all custom kernel sizes must be specified");
+ if (has_custom_kernel_size &&
+ !(hasArgument(op.arg(), "kernel_h") && hasArgument(op.arg(), "kernel_w")))
+ problems_ops_set.insert(
+ "If one custom kernel size specified - all custom kernel sizes must be specified");
}
-void Caffe2OpCreator::checkFC(const ::caffe2::OperatorDef& op,
- std::set<std::string>& problems_ops_set) {
+void Caffe2OpCreator::checkFC(const ::caffe2::OperatorDef &op,
+ std::set<std::string> &problems_ops_set)
+{
commonCheck(op, problems_ops_set);
- for (auto& s : {"axis", "axis_w", "float16_compute"})
+ for (auto &s : {"axis", "axis_w", "float16_compute"})
if (hasArgument(op.arg(), s))
problems_ops_set.insert(std::string("FC: only default '") + s + "' value is supported");
}
-void Caffe2OpCreator::checkSpatialBN(const ::caffe2::OperatorDef& op,
- std::set<std::string>& problems_ops_set) {
+void Caffe2OpCreator::checkSpatialBN(const ::caffe2::OperatorDef &op,
+ std::set<std::string> &problems_ops_set)
+{
commonCheck(op, problems_ops_set);
if (op.input_size() != 5)
problems_ops_set.insert(
- "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
+ "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
if (getSingleArgument(op, "is_test", 1) != 1)
problems_ops_set.insert(std::string("SpatialBN: only test mode supported"));
}
-void Caffe2OpCreator::commonCheck(const ::caffe2::OperatorDef& op,
- std::set<std::string>& problems_ops_set) {
+void Caffe2OpCreator::commonCheck(const ::caffe2::OperatorDef &op,
+ std::set<std::string> &problems_ops_set)
+{
if (getSingleArgument(op, "order", "NCHW") != "NCHW")
problems_ops_set.insert("Only 'NCHW' axis order is supported");
}
// Convert functions
//
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertAdd(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertAdd(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors)
+{
- std::vector<mir::Operation::Output*> add_input;
+ std::vector<mir::Operation::Output *> add_input;
add_input.reserve(inputs.size() + 1);
- for (const auto& i : inputs)
+ for (const auto &i : inputs)
add_input.emplace_back(convertCaffeToMIR(i));
// check mir tensors contain operand
- if (mir_tensors.find(op.input(1)) != mir_tensors.end()) {
+ if (mir_tensors.find(op.input(1)) != mir_tensors.end())
+ {
auto next_input = createOp<ops::ConstantOp>("Constant", mir_tensors.at(op.input(1)));
add_input.emplace_back(next_input->getOutput(0));
}
- auto add = createOp<ops::ElementwiseOp>("Elementwise_Add", add_input, ops::ElementwiseOp::OpType::add);
+ auto add =
+ createOp<ops::ElementwiseOp>("Elementwise_Add", add_input, ops::ElementwiseOp::OpType::add);
return {convertMIRToCaffe(add->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertAveragePool(const std::vector<mir::Operation::Output*>& inputs,
- const OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertAveragePool(const std::vector<mir::Operation::Output *> &inputs,
+ const OperatorDef &op)
+{
Shape window_shape = getWindowShape(op, inputs);
Shape strides(getStrides(op));
return {convertMIRToCaffe(pooling->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertConv(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertConv(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors)
+{
// dilation order: h w (not used)
Shape stride_shape(getStrides(op));
int num_groups = getSingleArgument(op, "group", 1);
bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
- mir::Operation* result;
- if (is_depthwise) {
+ mir::Operation *result;
+ if (is_depthwise)
+ {
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
auto kernel = createOp<ops::ConstantOp>("Constant", transposed_tensor)->getOutput(0);
result = createOp<ops::DepthwiseConv2DOp>("Depthwise_Conv2D", convertCaffeToMIR(inputs[0]),
kernel, stride_shape, pad_before, pad_after);
- } else {
+ }
+ else
+ {
// first we need to convert kernel of grouped convolution to appropriate ordinary kernel
if (num_groups != 1)
kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
kernel_tensor = transposeTensor<3, 0, 1, 2>(kernel_tensor);
auto kernel = createOp<ops::ConstantOp>("Constant", kernel_tensor)->getOutput(0);
- result = createOp<ops::Conv2DOp>("Conv2D", convertCaffeToMIR(inputs[0]), kernel,
- stride_shape, pad_before, pad_after);
+ result = createOp<ops::Conv2DOp>("Conv2D", convertCaffeToMIR(inputs[0]), kernel, stride_shape,
+ pad_before, pad_after);
}
- if (op.input_size() > 2) { // Bias is optional
+ if (op.input_size() > 2)
+ { // Bias is optional
auto bias = createOp<ops::ConstantOp>("Constant", mir_tensors.at(op.input(2)))->getOutput(0);
result = createOp<ops::BiasAddOp>("Bias_Add", result->getOutput(0), bias);
}
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertConcat(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertConcat(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
int axis = getSingleArgument(op, "axis", 1);
auto result = createOp<ops::ConcatOp>("Concat", inputs, axis);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertDropout(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertDropout(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
int is_test = getSingleArgument(op, "is_test", 0);
if (is_test)
return {inputs[0]};
return {dropout->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertFullyConnected(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertFullyConnected(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op,
+ const MIRTensors &mir_tensors)
+{
auto weights_tensor = transposeTensor<1, 0>(mir_tensors.at(op.input(1)));
- const auto& input_shape = inputs[0]->getShape();
+ const auto &input_shape = inputs[0]->getShape();
// Transform input into 2-D tensor by flattening axes
Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)};
result = createOp<ops::BiasAddOp>("Bias_Add", result->getOutput(0), bias);
return {result->getOutput(0)};
-
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertMaxPool(const std::vector<mir::Operation::Output*>& inputs,
- const OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertMaxPool(const std::vector<mir::Operation::Output *> &inputs,
+ const OperatorDef &op)
+{
Shape window_shape = getWindowShape(op, inputs);
Shape strides(getStrides(op));
std::vector<int32_t> pad_before, pad_after;
std::tie(pad_before, pad_after) = getPadding(op);
- auto pooling = createOp<ops::PoolOp>("Pool", convertCaffeToMIR(inputs[0]), pool_type, window_shape,
- strides, pad_before, pad_after, border_type);
+ auto pooling = createOp<ops::PoolOp>("Pool", convertCaffeToMIR(inputs[0]), pool_type,
+ window_shape, strides, pad_before, pad_after, border_type);
return {convertMIRToCaffe(pooling->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertMul(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertMul(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors)
+{
- std::vector<mir::Operation::Output*> input_descriptors;
+ std::vector<mir::Operation::Output *> input_descriptors;
input_descriptors.reserve(inputs.size() + 1);
- for (const auto& i: inputs)
+ for (const auto &i : inputs)
input_descriptors.emplace_back(convertCaffeToMIR(i));
// TODO: replace ConstantOp on inputs
- if (mir_tensors.find(op.input(1)) != mir_tensors.end()) {
+ if (mir_tensors.find(op.input(1)) != mir_tensors.end())
+ {
auto const_tensor = createOp<ops::ConstantOp>("Constant", mir_tensors.at(op.input(1)));
input_descriptors.emplace_back(const_tensor->getOutput(0));
}
- auto mul = createOp<ops::ElementwiseOp>("Elementwise_Mul", input_descriptors, ops::ElementwiseOp::OpType::mul);
+ auto mul = createOp<ops::ElementwiseOp>("Elementwise_Mul", input_descriptors,
+ ops::ElementwiseOp::OpType::mul);
return {convertMIRToCaffe(mul->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertRelu(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertRelu(const std::vector<mir::Operation::Output *> &inputs)
+{
auto relu = createOp<ops::ReluOp>("Relu", inputs[0]);
return {relu->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
// assume NCHW and convert to MIR (NHWC)
std::vector<float> scales(4);
assert(inputs[0]->getShape().rank() == 4 && "only 4d tensors is supported");
return {convertMIRToCaffe(resize->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertSigmoid(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSigmoid(const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::SigmoidOp>("Sigmoid", inputs[0]);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
int axis = getSingleArgument(op, "axis", 1);
auto softmax = createOp<ops::SoftmaxOp>("Softmax", inputs[0], axis);
return {softmax->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors)
+{
// overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
- const auto& scale_tensor = mir_tensors.at(op.input(1));
- const auto& bias_tensor = mir_tensors.at(op.input(2));
- const auto& mean_tensor = mir_tensors.at(op.input(3));
- const auto& var_tensor = mir_tensors.at(op.input(4));
+ const auto &scale_tensor = mir_tensors.at(op.input(1));
+ const auto &bias_tensor = mir_tensors.at(op.input(2));
+ const auto &mean_tensor = mir_tensors.at(op.input(3));
+ const auto &var_tensor = mir_tensors.at(op.input(4));
float eps = getSingleArgument(op, "epsilon", 1e-5f);
// res1 = X - mean
Tensor<float> bias_data(mean_tensor);
- for (auto& idx: ShapeRange(bias_data.getShape()))
+ for (auto &idx : ShapeRange(bias_data.getShape()))
bias_data.at(idx) *= -1;
auto mean = createOp<ops::ConstantOp>("Constant", mean_tensor)->getOutput(0);
// res2 = res1 * scale / (var + epsilon)
Tensor<float> multiplier(scale_tensor);
- for (auto& idx: ShapeRange(scale_tensor.getShape()))
- multiplier.at(idx) /= std::sqrt(*reinterpret_cast<float*>(var_tensor.at(idx)) + eps);
+ for (auto &idx : ShapeRange(scale_tensor.getShape()))
+ multiplier.at(idx) /= std::sqrt(*reinterpret_cast<float *>(var_tensor.at(idx)) + eps);
auto scale = createOp<ops::ConstantOp>("Constant", scale_tensor)->getOutput(0);
result = createOp<ops::ScaleOp>("Scale", result->getOutput(0), scale);
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertSum(const std::vector<mir::Operation::Output*>& inputs) {
- const auto& input_shape = inputs[0]->getShape();
- for (auto& in : inputs)
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertSum(const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &input_shape = inputs[0]->getShape();
+ for (auto &in : inputs)
assert(input_shape == in->getShape() && "All Sum inputs must have same shape");
- auto op = createOp<ops::ElementwiseOp>("Elementwise_Add", inputs, ops::ElementwiseOp::OpType::add);
+ auto op =
+ createOp<ops::ElementwiseOp>("Elementwise_Add", inputs, ops::ElementwiseOp::OpType::add);
return {op->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertClip(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertClip(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op)
+{
float max = getSingleArgument(op, "max", float(0));
float min = getSingleArgument(op, "min", float(0));
return {cap_relu->getOutput(0)};
}
-
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::convertReshape(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output *>
+Caffe2OpCreator::convertReshape(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors)
+{
// Check new shape input
assert(mir_tensors.find(op.input(1)) != mir_tensors.end());
- const auto& shape_tensor = mir_tensors.at(op.input(1));
+ const auto &shape_tensor = mir_tensors.at(op.input(1));
Tensor<int64_t> out_shape_tensor(shape_tensor);
ShapeRange range(out_shape_tensor.getShape());
std::vector<int32_t> shape_vec;
- for (const auto& index: range) {
+ for (const auto &index : range)
+ {
shape_vec.push_back(static_cast<int32_t>(out_shape_tensor.at(index)));
}
Shape out_shape(shape_vec);
return {reshape->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-Caffe2OpCreator::createInput(const std::string& name, const mir::Shape& shape) {
+std::vector<mir::Operation::Output *> Caffe2OpCreator::createInput(const std::string &name,
+ const mir::Shape &shape)
+{
auto variable = _graph->create<ops::InputOp>(name, shape);
return {variable->getOutput(0)};
}
#include "caffe2/proto/caffe2.pb.h"
-namespace nnc {
+namespace nnc
+{
using mir::Graph;
using mir::Operation;
using mir::Shape;
using MIRTensors = const std::map<std::string, mir::TensorVariant>;
-class Caffe2OpCreator {
+class Caffe2OpCreator
+{
public:
- explicit Caffe2OpCreator(Graph* g) : _graph(g) {};
+ explicit Caffe2OpCreator(Graph *g) : _graph(g){};
- void checkConvLikeOp(const ::caffe2::OperatorDef& op, std::set<std::string>& problems_ops_set);
+ void checkConvLikeOp(const ::caffe2::OperatorDef &op, std::set<std::string> &problems_ops_set);
- void checkFC(const ::caffe2::OperatorDef& op, std::set<std::string>& problems_ops_set);
+ void checkFC(const ::caffe2::OperatorDef &op, std::set<std::string> &problems_ops_set);
- void checkSpatialBN(const ::caffe2::OperatorDef& op, std::set<std::string>& problems_ops_set);
+ void checkSpatialBN(const ::caffe2::OperatorDef &op, std::set<std::string> &problems_ops_set);
- void commonCheck(const ::caffe2::OperatorDef& op, std::set<std::string>& problems_ops_set);
+ void commonCheck(const ::caffe2::OperatorDef &op, std::set<std::string> &problems_ops_set);
- std::vector<mir::Operation::Output*>
- createInput(const std::string& name, const mir::Shape& shape);
+ std::vector<mir::Operation::Output *> createInput(const std::string &name,
+ const mir::Shape &shape);
- std::vector<mir::Operation::Output*>
- convertAdd(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors&mir_tensors);
+ std::vector<mir::Operation::Output *>
+ convertAdd(const std::vector<mir::Operation::Output *> &inputs, const ::caffe2::OperatorDef &op,
+ const MIRTensors &mir_tensors);
- std::vector<mir::Operation::Output*>
- convertAveragePool(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
+ std::vector<mir::Operation::Output *>
+ convertAveragePool(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op);
- std::vector<mir::Operation::Output*>
- convertConv(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors);
+ std::vector<mir::Operation::Output *>
+ convertConv(const std::vector<mir::Operation::Output *> &inputs, const ::caffe2::OperatorDef &op,
+ const MIRTensors &mir_tensors);
- std::vector<mir::Operation::Output*>
- convertConcat(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
+ std::vector<mir::Operation::Output *>
+ convertConcat(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op);
- std::vector<mir::Operation::Output*>
- convertDropout(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
+ std::vector<mir::Operation::Output *>
+ convertDropout(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op);
- std::vector<mir::Operation::Output*>
- convertFullyConnected(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors);
+ std::vector<mir::Operation::Output *>
+ convertFullyConnected(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors);
- std::vector<mir::Operation::Output*>
- convertMaxPool(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
+ std::vector<mir::Operation::Output *>
+ convertMaxPool(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op);
- std::vector<mir::Operation::Output*>
- convertMul(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors);
+ std::vector<mir::Operation::Output *>
+ convertMul(const std::vector<mir::Operation::Output *> &inputs, const ::caffe2::OperatorDef &op,
+ const MIRTensors &mir_tensors);
- std::vector<mir::Operation::Output*>
- convertRelu(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertRelu(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertResizeNearest(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
+ std::vector<mir::Operation::Output *>
+ convertResizeNearest(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op);
- std::vector<mir::Operation::Output*>
- convertSigmoid(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSigmoid(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
+ std::vector<mir::Operation::Output *>
+ convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op);
- std::vector<mir::Operation::Output*>
- convertSpatialBN(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors);
+ std::vector<mir::Operation::Output *>
+ convertSpatialBN(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors);
- std::vector<mir::Operation::Output*>
- convertSum(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSum(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertClip(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op);
-
- std::vector<mir::Operation::Output*>
- convertReshape(const std::vector<mir::Operation::Output*>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors);
+ std::vector<mir::Operation::Output *>
+ convertClip(const std::vector<mir::Operation::Output *> &inputs, const ::caffe2::OperatorDef &op);
+ std::vector<mir::Operation::Output *>
+ convertReshape(const std::vector<mir::Operation::Output *> &inputs,
+ const ::caffe2::OperatorDef &op, const MIRTensors &mir_tensors);
private:
- Graph* _graph = nullptr;
+ Graph *_graph = nullptr;
- mir::Operation::Output* convertCaffeToMIR(mir::Operation::Output* arg);
+ mir::Operation::Output *convertCaffeToMIR(mir::Operation::Output *arg);
- mir::Operation::Output* convertMIRToCaffe(mir::Operation::Output* arg);
+ mir::Operation::Output *convertMIRToCaffe(mir::Operation::Output *arg);
- template <typename OpType, typename ...Types>
- mir::Operation* createOp(const std::string& name, Types&& ... args);
+ template <typename OpType, typename... Types>
+ mir::Operation *createOp(const std::string &name, Types &&... args);
};
-template <typename OpType, typename ...Types>
-mir::Operation* Caffe2OpCreator::createOp(const std::string& name, Types&& ... args) {
- mir::Operation* new_op = _graph->create<OpType>("", std::forward<Types>(args)...);
+template <typename OpType, typename... Types>
+mir::Operation *Caffe2OpCreator::createOp(const std::string &name, Types &&... args)
+{
+ mir::Operation *new_op = _graph->create<OpType>("", std::forward<Types>(args)...);
std::string op_name = name + "_" + std::to_string(new_op->getId());
new_op->setName(op_name);
return new_op;
} // namespace nnc
-#endif //NNCC_CAFFE2_OP_CREATOR_H
+#endif // NNCC_CAFFE2_OP_CREATOR_H
#ifndef NNCC_CAFFE2_OP_TYPES_H
#define NNCC_CAFFE2_OP_TYPES_H
-namespace nnc {
+namespace nnc
+{
-enum class SupportedCaffe2OpType {
+enum class SupportedCaffe2OpType
+{
add,
averagePool,
clip,
sum,
};
-} // namespace nnc
+} // namespace nnc
-#endif // NNCC_CAFFE2_OP_TYPES_H
+#endif // NNCC_CAFFE2_OP_TYPES_H
#include "caffe2_proto_helper.h"
-namespace nnc {
+namespace nnc
+{
-const ::caffe2::Argument& findArgumentByName(RepArgument args, const std::string& name) {
- for (auto& arg : args)
+const ::caffe2::Argument &findArgumentByName(RepArgument args, const std::string &name)
+{
+ for (auto &arg : args)
if (arg.name() == name)
return arg;
throw std::runtime_error("Can't find argument with name: " + name);
}
-const bool hasArgument(RepArgument args, const std::string& name) {
- for (auto& arg : args)
+const bool hasArgument(RepArgument args, const std::string &name)
+{
+ for (auto &arg : args)
if (arg.name() == name)
return true;
return false;
}
-int getSingleArgument(const ::caffe2::OperatorDef& op, const std::string& argument_name,
- const int default_value) {
+int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const int default_value)
+{
if (hasArgument(op.arg(), argument_name))
return static_cast<int>(findArgumentByName(op.arg(), argument_name).i());
return default_value;
}
-float getSingleArgument(const ::caffe2::OperatorDef& op, const std::string& argument_name,
- const float default_value) {
+float getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const float default_value)
+{
if (hasArgument(op.arg(), argument_name))
return findArgumentByName(op.arg(), argument_name).f();
return default_value;
}
-std::string getSingleArgument(const ::caffe2::OperatorDef& op, const std::string& argument_name,
- const std::string& default_value) {
+std::string getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const std::string &default_value)
+{
if (hasArgument(op.arg(), argument_name))
return findArgumentByName(op.arg(), argument_name).s();
return default_value;
#include "caffe2/proto/caffe2.pb.h"
-namespace nnc {
+namespace nnc
+{
-using RepArgument = const ::google::protobuf::RepeatedPtrField<::caffe2::Argument>&;
+using RepArgument = const ::google::protobuf::RepeatedPtrField<::caffe2::Argument> &;
-const ::caffe2::Argument& findArgumentByName(RepArgument args, const std::string& name);
+const ::caffe2::Argument &findArgumentByName(RepArgument args, const std::string &name);
-const bool hasArgument(RepArgument args, const std::string& name);
+const bool hasArgument(RepArgument args, const std::string &name);
-int getSingleArgument(const ::caffe2::OperatorDef& op, const std::string& argument_name,
+int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
int default_value);
-float getSingleArgument(const ::caffe2::OperatorDef& op, const std::string& argument_name,
+float getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
float default_value);
-std::string getSingleArgument(const ::caffe2::OperatorDef& op, const std::string& argument_name,
- const std::string& default_value);
+std::string getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name,
+ const std::string &default_value);
} // namespace nnc
#include <stdexcept>
#include <vector>
-namespace nnc {
+namespace nnc
+{
using namespace ::caffe;
using namespace mir;
-CaffeImporter::CaffeImporter(std::string filename) : _modelFilename(std::move(filename)),
- _graph(new mir::Graph()),
- _opCreator(new CaffeOpCreator(_graph)) {}
+CaffeImporter::CaffeImporter(std::string filename)
+ : _modelFilename(std::move(filename)), _graph(new mir::Graph()),
+ _opCreator(new CaffeOpCreator(_graph))
+{
+}
CaffeImporter::~CaffeImporter() = default;
throw std::runtime_error("File \"" + filename + "\" has not been consumed entirely.");
}
-void CaffeImporter::import() {
+void CaffeImporter::import()
+{
GOOGLE_PROTOBUF_VERIFY_VERSION;
_net.reset(new NetParameter());
collectUnsupportedLayers();
}
-Graph* CaffeImporter::createIR() {
+Graph *CaffeImporter::createIR()
+{
for (int i = 0; i < _net->layer_size(); ++i)
createMIRNodesFromLayer(_net->layer(i));
return _graph;
}
-void CaffeImporter::collectUnsupportedLayers() {
+void CaffeImporter::collectUnsupportedLayers()
+{
processDeprecatedInput();
for (int i = 0; i < _net->layer_size(); ++i)
collectUnsupportedOp(_net->layer(i));
- if (!_problemsOpSet.empty()) {
+ if (!_problemsOpSet.empty())
+ {
std::string msg("NNC can't load model. Detected problems:");
- for (const auto& problemStr : _problemsOpSet)
+ for (const auto &problemStr : _problemsOpSet)
msg.append("\n * " + problemStr);
throw std::runtime_error(msg);
}
}
-void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& layer) {
- std::vector<mir::Operation::Output*> inputs = getMIRInputsForLayer(layer);
- std::vector<mir::Operation::Output*> outputs;
+void CaffeImporter::createMIRNodesFromLayer(const LayerParameter &layer)
+{
+ std::vector<mir::Operation::Output *> inputs = getMIRInputsForLayer(layer);
+ std::vector<mir::Operation::Output *> outputs;
- switch (_operatorTypes.at(layer.type())) {
+ switch (_operatorTypes.at(layer.type()))
+ {
case CaffeOpType::input:
outputs = _opCreator->convertInput(layer);
break;
_blobNameToOpOutput[layer.top(i)] = outputs.at(i);
}
-void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
+void CaffeImporter::collectUnsupportedOp(const LayerParameter &lp)
+{
auto it = _operatorTypes.find(lp.type());
- if (it == _operatorTypes.end()) {
+ if (it == _operatorTypes.end())
+ {
_problemsOpSet.insert(lp.type() + ": unknown layer");
return;
}
CaffeOpType op_type = it->second;
- switch (op_type) {
+ switch (op_type)
+ {
case CaffeOpType::concat:
case CaffeOpType::input:
case CaffeOpType::softmax:
}
}
-void CaffeImporter::processDeprecatedInput() {
+void CaffeImporter::processDeprecatedInput()
+{
if (_net->input_dim_size() != 0 || _net->input_shape_size() != 0)
throw std::runtime_error("Deprecated Caffe input types are not supported");
}
-std::vector<mir::Operation::Output*>
-CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) {
- std::vector<mir::Operation::Output*> inputs;
+std::vector<mir::Operation::Output *>
+CaffeImporter::getMIRInputsForLayer(const LayerParameter &layer)
+{
+ std::vector<mir::Operation::Output *> inputs;
- for (const auto& input_name : layer.bottom())
+ for (const auto &input_name : layer.bottom())
inputs.push_back(_blobNameToOpOutput.at(input_name));
return inputs;
}
-void CaffeImporter::setGraphOutputs() {
- const auto& last_layer = _net->layer(_net->layer_size() - 1);
+void CaffeImporter::setGraphOutputs()
+{
+ const auto &last_layer = _net->layer(_net->layer_size() - 1);
// For now, we assume that:
// - there is exactly one output;
// - the output is from the last layer.
output->getNode()->setName("");
}
-void CaffeImporter::cleanup() {
- delete _graph;
-}
+void CaffeImporter::cleanup() { delete _graph; }
const std::map<std::string, CaffeOpType> CaffeImporter::_operatorTypes = {
- {"AbsVal", CaffeOpType::absVal},
- {"Accuracy", CaffeOpType::accuracy},
- {"ArgMax", CaffeOpType::argMax},
- {"BatchNorm", CaffeOpType::batchNorm},
- {"BatchReindex", CaffeOpType::batchReindex},
- {"Bias", CaffeOpType::bias},
- {"BNLL", CaffeOpType::BNLL},
- {"Clip", CaffeOpType::clip},
- {"Concat", CaffeOpType::concat},
- {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
- {"Convolution", CaffeOpType::convolution},
- {"Crop", CaffeOpType::crop},
- {"Data", CaffeOpType::data},
- {"Deconvolution", CaffeOpType::deconvolution},
- {"Dropout", CaffeOpType::dropout},
- {"DummyData", CaffeOpType::dummyData},
- {"Eltwise", CaffeOpType::eltwise},
- {"ELU", CaffeOpType::ELU},
- {"Embed", CaffeOpType::embed},
- {"EuclidianLoss", CaffeOpType::euclidianLoss},
- {"Exp", CaffeOpType::exp},
- {"Filter", CaffeOpType::filter},
- {"Flatten", CaffeOpType::flatten},
- {"HDF5Data", CaffeOpType::HDF5Data},
- {"HDF5Output", CaffeOpType::HDF5Output},
- {"HingeLoss", CaffeOpType::hingeLoss},
- {"Im2Col", CaffeOpType::im2Col},
- {"ImageData", CaffeOpType::imageData},
- {"InfogainLoss", CaffeOpType::infogainLoss},
- {"InnerProduct", CaffeOpType::innerProduct},
- {"Input", CaffeOpType::input},
- {"Log", CaffeOpType::log},
- {"LRN", CaffeOpType::LRN},
- {"LSTM", CaffeOpType::LSTM},
- {"MemoryData", CaffeOpType::memoryData},
- {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
- {"MVN", CaffeOpType::MVN},
- {"Parameter", CaffeOpType::parameter},
- {"Pooling", CaffeOpType::pooling},
- {"Power", CaffeOpType::power},
- {"PReLU", CaffeOpType::PReLU},
- {"Python", CaffeOpType::python},
- {"Recurrent", CaffeOpType::recurrent},
- {"Reduction", CaffeOpType::reduction},
- {"ReLU", CaffeOpType::ReLU},
- {"Reshape", CaffeOpType::reshape},
- {"RNN", CaffeOpType::RNN},
- {"Scale", CaffeOpType::scale},
- {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
- {"Sigmoid", CaffeOpType::sigmoid},
- {"Silence", CaffeOpType::silence},
- {"Softmax", CaffeOpType::softmax},
- {"SoftmaxWithLoss", CaffeOpType::softmaxWithLoss},
- {"SPP", CaffeOpType::SPP},
- {"Split", CaffeOpType::split},
- {"Slice", CaffeOpType::slice},
- {"TanH", CaffeOpType::tanh},
- {"Threshold", CaffeOpType::threshold},
- {"Tile", CaffeOpType::tile},
- {"WindowData", CaffeOpType::windowData}
-};
+ {"AbsVal", CaffeOpType::absVal},
+ {"Accuracy", CaffeOpType::accuracy},
+ {"ArgMax", CaffeOpType::argMax},
+ {"BatchNorm", CaffeOpType::batchNorm},
+ {"BatchReindex", CaffeOpType::batchReindex},
+ {"Bias", CaffeOpType::bias},
+ {"BNLL", CaffeOpType::BNLL},
+ {"Clip", CaffeOpType::clip},
+ {"Concat", CaffeOpType::concat},
+ {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
+ {"Convolution", CaffeOpType::convolution},
+ {"Crop", CaffeOpType::crop},
+ {"Data", CaffeOpType::data},
+ {"Deconvolution", CaffeOpType::deconvolution},
+ {"Dropout", CaffeOpType::dropout},
+ {"DummyData", CaffeOpType::dummyData},
+ {"Eltwise", CaffeOpType::eltwise},
+ {"ELU", CaffeOpType::ELU},
+ {"Embed", CaffeOpType::embed},
+ {"EuclidianLoss", CaffeOpType::euclidianLoss},
+ {"Exp", CaffeOpType::exp},
+ {"Filter", CaffeOpType::filter},
+ {"Flatten", CaffeOpType::flatten},
+ {"HDF5Data", CaffeOpType::HDF5Data},
+ {"HDF5Output", CaffeOpType::HDF5Output},
+ {"HingeLoss", CaffeOpType::hingeLoss},
+ {"Im2Col", CaffeOpType::im2Col},
+ {"ImageData", CaffeOpType::imageData},
+ {"InfogainLoss", CaffeOpType::infogainLoss},
+ {"InnerProduct", CaffeOpType::innerProduct},
+ {"Input", CaffeOpType::input},
+ {"Log", CaffeOpType::log},
+ {"LRN", CaffeOpType::LRN},
+ {"LSTM", CaffeOpType::LSTM},
+ {"MemoryData", CaffeOpType::memoryData},
+ {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
+ {"MVN", CaffeOpType::MVN},
+ {"Parameter", CaffeOpType::parameter},
+ {"Pooling", CaffeOpType::pooling},
+ {"Power", CaffeOpType::power},
+ {"PReLU", CaffeOpType::PReLU},
+ {"Python", CaffeOpType::python},
+ {"Recurrent", CaffeOpType::recurrent},
+ {"Reduction", CaffeOpType::reduction},
+ {"ReLU", CaffeOpType::ReLU},
+ {"Reshape", CaffeOpType::reshape},
+ {"RNN", CaffeOpType::RNN},
+ {"Scale", CaffeOpType::scale},
+ {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
+ {"Sigmoid", CaffeOpType::sigmoid},
+ {"Silence", CaffeOpType::silence},
+ {"Softmax", CaffeOpType::softmax},
+ {"SoftmaxWithLoss", CaffeOpType::softmaxWithLoss},
+ {"SPP", CaffeOpType::SPP},
+ {"Split", CaffeOpType::split},
+ {"Slice", CaffeOpType::slice},
+ {"TanH", CaffeOpType::tanh},
+ {"Threshold", CaffeOpType::threshold},
+ {"Tile", CaffeOpType::tile},
+ {"WindowData", CaffeOpType::windowData}};
} // namespace nnc
-
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
#include "caffe_op_creator.h"
#include "caffe_op_types.h"
-namespace nnc {
+namespace nnc
+{
-class CaffeImporter {
+class CaffeImporter
+{
public:
explicit CaffeImporter(std::string filename);
void import();
- mir::Graph* createIR();
+ mir::Graph *createIR();
void cleanup();
private:
std::string _modelFilename;
std::unique_ptr<::caffe::NetParameter> _net;
- mir::Graph* _graph;
+ mir::Graph *_graph;
std::unique_ptr<CaffeOpCreator> _opCreator;
// Maps Caffe blob names to corresponding MIR operation outputs.
- std::map<std::string, mir::Operation::Output*> _blobNameToOpOutput;
+ std::map<std::string, mir::Operation::Output *> _blobNameToOpOutput;
static const std::map<std::string, CaffeOpType> _operatorTypes;
// set of strings describing incorrect parts of network and parts of network unsupported by NNC
std::set<std::string> _problemsOpSet;
/**
- * @brief Mark output MIR nodes
- */
+ * @brief Mark output MIR nodes
+ */
void setGraphOutputs();
/**
- * @brief Pass through caffe graph and collect unsupported by NNC layers
- * @throw PassException with message, containing detected problems
- */
+ * @brief Pass through caffe graph and collect unsupported by NNC layers
+ * @throw PassException with message, containing detected problems
+ */
void collectUnsupportedLayers();
/**
- * @brief Create MIR node from single caffe layer
- */
- void createMIRNodesFromLayer(const ::caffe::LayerParameter& layer);
+ * @brief Create MIR node from single caffe layer
+ */
+ void createMIRNodesFromLayer(const ::caffe::LayerParameter &layer);
/**
- * @brief Collect unsupported parts of caffe layer
- */
- void collectUnsupportedOp(const ::caffe::LayerParameter& lp);
+ * @brief Collect unsupported parts of caffe layer
+ */
+ void collectUnsupportedOp(const ::caffe::LayerParameter &lp);
/**
- * @brief Returns MIR operation outputs corresponding to the inputs of the given layer.
- */
- std::vector<mir::Operation::Output*> getMIRInputsForLayer(const ::caffe::LayerParameter& layer);
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given layer.
+ */
+ std::vector<mir::Operation::Output *> getMIRInputsForLayer(const ::caffe::LayerParameter &layer);
void processDeprecatedInput();
};
#include <iostream>
#include <set>
-namespace nnc {
+namespace nnc
+{
static TensorVariant fixGroupedKernel(int groups, const TensorVariant &folded_kernel)
{
using namespace mir;
-mir::Operation::Output* CaffeOpCreator::convertCaffeToMIR(mir::Operation::Output* arg) {
+mir::Operation::Output *CaffeOpCreator::convertCaffeToMIR(mir::Operation::Output *arg)
+{
// NCHW -> NHWC
auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 2, 3, 1});
return transpose->getOutput(0);
}
-mir::Operation::Output* CaffeOpCreator::convertMIRToCaffe(mir::Operation::Output* arg) {
+mir::Operation::Output *CaffeOpCreator::convertMIRToCaffe(mir::Operation::Output *arg)
+{
// NHWC -> NCHW
auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 3, 1, 2});
return transpose->getOutput(0);
}
-mir::Operation::Output*
-CaffeOpCreator::createAdd(mir::Operation::Output* arg1, mir::Operation::Output* arg2) {
- std::vector<mir::Operation::Output*> inputs{arg1, arg2};
+mir::Operation::Output *CaffeOpCreator::createAdd(mir::Operation::Output *arg1,
+ mir::Operation::Output *arg2)
+{
+ std::vector<mir::Operation::Output *> inputs{arg1, arg2};
auto op = createOp<ops::ElementwiseOp>("", inputs, ops::ElementwiseOp::OpType::add);
return op->getOutput(0);
}
-mir::Operation::Output*
-CaffeOpCreator::createMul(mir::Operation::Output* arg1, mir::Operation::Output* arg2) {
- std::vector<mir::Operation::Output*> inputs{arg1, arg2};
+mir::Operation::Output *CaffeOpCreator::createMul(mir::Operation::Output *arg1,
+ mir::Operation::Output *arg2)
+{
+ std::vector<mir::Operation::Output *> inputs{arg1, arg2};
auto op = createOp<ops::ElementwiseOp>("", inputs, ops::ElementwiseOp::OpType::mul);
return op->getOutput(0);
}
/// @brief Split arg into @p num_parts equal parts along @p axis axis.
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::createSplit(mir::Operation::Output* arg, int32_t num_parts, int32_t axis) {
- const auto& arg_shape = arg->getShape();
+std::vector<mir::Operation::Output *> CaffeOpCreator::createSplit(mir::Operation::Output *arg,
+ int32_t num_parts, int32_t axis)
+{
+ const auto &arg_shape = arg->getShape();
assert(axis >= 0 && axis < arg_shape.rank());
int32_t part_size = arg_shape.dim(axis) / num_parts;
Shape sizes(arg_shape);
sizes.dim(axis) = part_size;
- std::vector<mir::Operation::Output*> outputs(num_parts);
- for (int32_t i = 0; i < num_parts; ++i) {
+ std::vector<mir::Operation::Output *> outputs(num_parts);
+ for (int32_t i = 0; i < num_parts; ++i)
+ {
outputs[i] = createOp<ops::SliceOp>("", arg, starts, sizes)->getOutput(0);
starts.dim(axis) += part_size;
}
}
/// @brief Helper function for creating FullyConnected operation with non-square input.
-mir::Operation::Output*
-CaffeOpCreator::createFullyConnected(mir::Operation::Output* input,
- mir::Operation::Output* weights,
- int32_t axis) {
- const auto& input_shape = input->getShape();
- const auto& weights_shape = weights->getShape();
+mir::Operation::Output *CaffeOpCreator::createFullyConnected(mir::Operation::Output *input,
+ mir::Operation::Output *weights,
+ int32_t axis)
+{
+ const auto &input_shape = input->getShape();
+ const auto &weights_shape = weights->getShape();
assert(axis >= 0 && axis < input_shape.rank());
assert(weights_shape.rank() == 2);
return createOp<ops::ReshapeOp>("", fc, result_shape)->getOutput(0);
}
-TensorVariant CaffeOpCreator::convertBlob(const BlobProto& blob) {
- const void* src_data;
+TensorVariant CaffeOpCreator::convertBlob(const BlobProto &blob)
+{
+ const void *src_data;
DTYPE dtype;
- if (blob.data_size() != 0) {
+ if (blob.data_size() != 0)
+ {
assert(blob.double_data_size() == 0);
dtype = DTYPE::FLOAT32;
src_data = blob.data().data();
- } else if (blob.double_data_size() != 0) {
+ }
+ else if (blob.double_data_size() != 0)
+ {
dtype = DTYPE::FLOAT64;
src_data = blob.double_data().data();
- } else {
+ }
+ else
+ {
throw std::runtime_error("No data in Caffe BlobProto, investigate");
}
return TensorVariant(dtype, shape, src_data);
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertInput(const LayerParameter& layer) {
- const auto& params = layer.input_param();
+std::vector<mir::Operation::Output *> CaffeOpCreator::convertInput(const LayerParameter &layer)
+{
+ const auto ¶ms = layer.input_param();
const auto num_inputs = layer.top_size();
const auto num_shapes = params.shape_size();
- std::vector<mir::Operation::Output*> outputs;
+ std::vector<mir::Operation::Output *> outputs;
assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes.");
- for (int i = 0; i < num_inputs; ++i) {
- const auto& blob_name = layer.top(i);
- const auto& blob_shape = params.shape(num_shapes == 1 ? 0 : i);
+ for (int i = 0; i < num_inputs; ++i)
+ {
+ const auto &blob_name = layer.top(i);
+ const auto &blob_shape = params.shape(num_shapes == 1 ? 0 : i);
const mir::Shape shape = convertBlobShape(blob_shape);
auto variable = createOp<ops::InputOp>(blob_name, shape);
outputs.push_back(variable->getOutput(0));
return outputs;
}
-static void convertConvolutionParam(const ConvolutionParameter& conv_param, Shape& strides,
- std::vector<int32_t>& padding) {
+static void convertConvolutionParam(const ConvolutionParameter &conv_param, Shape &strides,
+ std::vector<int32_t> &padding)
+{
int32_t stride_h, stride_w;
- if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
+ if (conv_param.has_stride_h() || conv_param.has_stride_w())
+ {
// If stride_h or stride_w are set, they take precedence.
stride_h = conv_param.stride_h();
stride_w = conv_param.stride_w();
- } else if (conv_param.stride_size() == 0) {
+ }
+ else if (conv_param.stride_size() == 0)
+ {
// If no strides specified, they defaults to 1.
stride_h = stride_w = 1;
- } else if (conv_param.stride_size() == 1) {
+ }
+ else if (conv_param.stride_size() == 1)
+ {
// If only one stride specified, all strides take the same value.
stride_h = stride_w = conv_param.stride(0);
- } else {
+ }
+ else
+ {
// Otherwise, there must be a stride for each dimension.
assert(conv_param.stride_size() == 2);
stride_h = conv_param.stride(0);
strides = {stride_h, stride_w};
int32_t pad_h, pad_w;
- if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
+ if (conv_param.has_pad_h() || conv_param.has_pad_w())
+ {
// If pad_h or pad_w are set, they take precedence.
pad_h = conv_param.pad_h();
pad_w = conv_param.pad_w();
- } else if (conv_param.pad_size() == 0) {
+ }
+ else if (conv_param.pad_size() == 0)
+ {
// If no pads specified, they defaults to 0.
pad_h = pad_w = 0;
- } else if (conv_param.pad_size() == 1) {
+ }
+ else if (conv_param.pad_size() == 1)
+ {
// If only one pad specified, all pads take the same value.
pad_h = pad_w = conv_param.pad(0);
- } else {
+ }
+ else
+ {
// Otherwise, there must be a pad for each dimension.
assert(conv_param.pad_size() == 2);
pad_h = conv_param.pad(0);
padding = {pad_h, pad_w};
}
-void CaffeOpCreator::checkConvolution(const ConvolutionParameter& opts,
- std::set<std::string>& problems_ops_set) {
+void CaffeOpCreator::checkConvolution(const ConvolutionParameter &opts,
+ std::set<std::string> &problems_ops_set)
+{
assert(opts.stride_size() <= 2);
if (opts.axis() != 1)
problems_ops_set.insert("Conv2D: Unsupported number of pads");
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertConvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.convolution_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertConvolution(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.convolution_param();
Shape strides;
std::vector<int32_t> padding;
auto kernel_weights = convertBlob(layer.blobs(0));
kernel_weights = transposeTensor<2, 3, 1, 0>(kernel_weights);
- Operation* result;
+ Operation *result;
auto in_group_size = kernel_weights.getShape().dim(2);
auto out_channels = kernel_weights.getShape().dim(3);
int32_t num_groups = params.group();
bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
- if (is_depthwise) {
+ if (is_depthwise)
+ {
// This is depthwise convolution
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = transposeTensor<0, 1, 3, 2>(kernel_weights);
auto kernel = createOp<ops::ConstantOp>("", transposed_tensor)->getOutput(0);
result = createOp<ops::DepthwiseConv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]), kernel,
strides, padding, padding);
- } else {
- if (num_groups != 1) {
+ }
+ else
+ {
+ if (num_groups != 1)
+ {
// first we need to convert kernel of grouped convolution to appropriate ordinary kernel
kernel_weights = fixGroupedKernel(params.group(), kernel_weights);
}
kernel_weights = transposeTensor<3, 0, 1, 2>(kernel_weights);
auto kernel = createOp<ops::ConstantOp>("", kernel_weights)->getOutput(0);
- result = createOp<ops::Conv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]), kernel,
- strides, padding, padding);
+ result = createOp<ops::Conv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]), kernel, strides,
+ padding, padding);
}
// Add the bias, if any.
- if (params.bias_term()) {
+ if (params.bias_term())
+ {
auto bias = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(1)))->getOutput(0);
result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias);
}
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.convolution_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.convolution_param();
Shape strides;
std::vector<int32_t> padding;
auto kernel_weights = convertBlob(layer.blobs(0));
kernel_weights = transposeTensor<2, 3, 1, 0>(kernel_weights);
- if (opts.group() != 1) {
+ if (opts.group() != 1)
+ {
// first we need to convert kernel of grouped convolution to appropriate ordinary kernel
kernel_weights = fixGroupedKernel(opts.group(), kernel_weights);
}
strides, padding);
// bias_term is optional (so might not be present) and defaults to true
- if (opts.bias_term()) {
+ if (opts.bias_term())
+ {
auto bias = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(1)))->getOutput(0);
result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias);
}
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertInnerProduct(const LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.inner_product_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertInnerProduct(const LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.inner_product_param();
auto weights_tensor = convertBlob(layer.blobs(0));
if (!params.transpose())
auto result = createFullyConnected(inputs[0], weights, params.axis());
// Add the bias, if any.
- if (params.bias_term()) {
+ if (params.bias_term())
+ {
auto bias = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(1)))->getOutput(0);
result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result, bias)->getOutput(0);
}
return {result};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertConcat(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.concat_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertConcat(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.concat_param();
auto concat = createOp<ops::ConcatOp>(layer.name(), inputs, params.axis());
return {concat->getOutput(0)};
}
-static ops::PoolOp::PoolingType getPoolingType(const PoolingParameter& pool_param) {
+static ops::PoolOp::PoolingType getPoolingType(const PoolingParameter &pool_param)
+{
using PoolingType = ops::PoolOp::PoolingType;
if (pool_param.pool() == PoolingParameter::MAX)
PoolingParameter::PoolMethod_Name(pool_param.pool()));
}
-static void convertPoolingParam(const caffe::PoolingParameter& pool_param,
- const mir::Shape& input_shape,
- Shape& window_shape, Shape& strides,
- std::vector<int32_t>& padding_before,
- std::vector<int32_t>& padding_after) {
+static void convertPoolingParam(const caffe::PoolingParameter &pool_param,
+ const mir::Shape &input_shape, Shape &window_shape, Shape &strides,
+ std::vector<int32_t> &padding_before,
+ std::vector<int32_t> &padding_after)
+{
int32_t kernel_h, kernel_w;
assert(!pool_param.global_pooling());
- if (pool_param.has_kernel_size()) {
+ if (pool_param.has_kernel_size())
+ {
kernel_h = kernel_w = pool_param.kernel_size();
- } else {
+ }
+ else
+ {
kernel_h = pool_param.kernel_h();
kernel_w = pool_param.kernel_w();
}
window_shape = {kernel_h, kernel_w};
int32_t stride_h, stride_w;
- if (pool_param.has_stride_h() || pool_param.has_stride_w()) {
+ if (pool_param.has_stride_h() || pool_param.has_stride_w())
+ {
stride_h = pool_param.stride_h();
stride_w = pool_param.stride_w();
- } else {
+ }
+ else
+ {
stride_h = stride_w = pool_param.stride();
}
strides = {stride_h, stride_w};
int32_t pad_h, pad_w;
- if (pool_param.has_pad_h() || pool_param.has_pad_w()) {
+ if (pool_param.has_pad_h() || pool_param.has_pad_w())
+ {
pad_h = pool_param.pad_h();
pad_w = pool_param.pad_w();
- } else {
+ }
+ else
+ {
pad_h = pad_w = pool_param.pad();
}
++padding_after[1];
}
-void CaffeOpCreator::checkPooling(const PoolingParameter& opts,
- std::set<std::string>& problems_ops_set) {
+void CaffeOpCreator::checkPooling(const PoolingParameter &opts,
+ std::set<std::string> &problems_ops_set)
+{
if (opts.has_global_pooling() && opts.global_pooling())
problems_ops_set.insert("Pooling: pooling layer global_pooling param is not supported yet");
problems_ops_set.insert("Pooling: conflicting padding properties in pooling");
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.pooling_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertPooling(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.pooling_param();
Shape window_shape;
Shape strides;
std::vector<int32_t> padding_before, padding_after;
- const auto& input_shape = inputs[0]->getShape();
+ const auto &input_shape = inputs[0]->getShape();
convertPoolingParam(opts, input_shape, window_shape, strides, padding_before, padding_after);
ops::PoolOp::PoolingType pool_type = getPoolingType(opts);
ops::PoolOp::BorderType border_type;
- switch (pool_type) {
+ switch (pool_type)
+ {
case ops::PoolOp::PoolingType::AVG:
border_type = ops::PoolOp::BorderType::ZEROFILLED;
break;
assert(false);
}
- auto pooling = createOp<ops::PoolOp>(layer.name(), convertCaffeToMIR(inputs[0]), pool_type,
- window_shape, strides, padding_before, padding_after,
- border_type);
+ auto pooling =
+ createOp<ops::PoolOp>(layer.name(), convertCaffeToMIR(inputs[0]), pool_type, window_shape,
+ strides, padding_before, padding_after, border_type);
return {convertMIRToCaffe(pooling->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertSoftmax(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.softmax_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertSoftmax(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.softmax_param();
// CPP and ACL backends are able to perform Softmax only along the last axis.
// FIXME Do it in backends.
- if (inputs[0]->getShape().rank() == 4) {
+ if (inputs[0]->getShape().rank() == 4)
+ {
// For now, we only account for the most common case.
if (params.axis() != 1)
throw std::runtime_error("Softmax: unsupported axis");
return {softmax->getOutput(0)};
}
-void CaffeOpCreator::checkReshape(const ReshapeParameter& opts,
- std::set<std::string>& problems_ops_set) {
+void CaffeOpCreator::checkReshape(const ReshapeParameter &opts,
+ std::set<std::string> &problems_ops_set)
+{
if (opts.has_axis() || opts.has_num_axes())
problems_ops_set.insert("Reshape layer axis and num_axes params are not supported yet");
* @todo Decide how to react to the absence of "shape" parameter.
* @todo Support zero values in "shape" parameter.
*/
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertReshape(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.reshape_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertReshape(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.reshape_param();
const mir::Shape new_shape = convertBlobShape(opts.shape());
auto reshape = createOp<ops::ReshapeOp>(layer.name(), inputs[0], new_shape);
return {reshape->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- mir::Operation* relu;
- if (layer.relu_param().has_negative_slope()) {
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertReLU(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ mir::Operation *relu;
+ if (layer.relu_param().has_negative_slope())
+ {
float alpha = layer.relu_param().negative_slope();
relu = createOp<ops::LeakyReluOp>(layer.name(), inputs[0], alpha);
- } else {
+ }
+ else
+ {
relu = createOp<ops::ReluOp>(layer.name(), inputs[0]);
}
return {relu->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertScale(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.scale_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertScale(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.scale_param();
auto scale = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(0)))->getOutput(0);
auto result = createOp<ops::ScaleOp>(layer.name(), convertCaffeToMIR(inputs[0]), scale);
// Add the bias, if any.
- if (params.bias_term()) {
+ if (params.bias_term())
+ {
auto bias = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(1)))->getOutput(0);
result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias);
}
return {convertMIRToCaffe(result->getOutput(0))};
}
-void CaffeOpCreator::checkBatchNorm(const caffe::LayerParameter& layer,
- std::set<std::string>& problems_ops_set) {
- const auto& scale_shape = layer.blobs(2).shape();
+void CaffeOpCreator::checkBatchNorm(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const auto &scale_shape = layer.blobs(2).shape();
// Check that last blob(with scaleFactor) containing only one number
if (scale_shape.dim_size() != 1 || scale_shape.dim(0) != 1)
problems_ops_set.insert("Unexpected shape of scale parameter in batch norm");
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.batch_norm_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.batch_norm_param();
float eps = opts.eps();
auto scale_weight = convertBlob(layer.blobs(2));
- float scale_factor = *reinterpret_cast<float*>(scale_weight.at(mir::Index{0}));
+ float scale_factor = *reinterpret_cast<float *>(scale_weight.at(mir::Index{0}));
// Code below is taken from cpu caffe implementation:
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
if (scale_factor != 0.0f)
for (Index idx : ShapeRange(bias_data.getShape()))
bias_data.at(idx) *= -scale_factor;
auto mean = createOp<ops::ConstantOp>("", mean_weights)->getOutput(0);
- auto result = createOp<ops::BiasAddOp>(layer.name() + ".bias", convertCaffeToMIR(inputs[0]),
- mean);
+ auto result =
+ createOp<ops::BiasAddOp>(layer.name() + ".bias", convertCaffeToMIR(inputs[0]), mean);
// create scale argument from variance:
// multiply elements of variance by scaleFactor and
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertDropout(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.dropout_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertDropout(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.dropout_param();
auto dropout = createOp<ops::DropoutOp>(layer.name(), inputs[0], opts.dropout_ratio());
return {dropout->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertELU(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.elu_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertELU(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.elu_param();
auto elu = createOp<ops::EluOp>(layer.name(), inputs[0], opts.alpha());
return {elu->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.embed_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertEmbed(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.embed_param();
auto data = createOp<ops::ConstantOp>(layer.name() + ".weights", convertBlob(layer.blobs(0)));
auto result = createOp<ops::GatherOp>(layer.name(), data->getOutput(0), inputs[0], 0);
// Add the bias, if any.
- if (params.bias_term()) {
+ if (params.bias_term())
+ {
auto bias = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(1)))->getOutput(0);
result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias);
}
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertSigmoid(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertSigmoid(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::SigmoidOp>(layer.name(), inputs[0]);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertTanH(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertTanH(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto tanh = createOp<ops::TanhOp>(layer.name(), inputs[0]);
return {tanh->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertEltwise(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- auto& opts = layer.eltwise_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertEltwise(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ auto &opts = layer.eltwise_param();
ops::ElementwiseOp::OpType optype;
- std::vector<mir::Operation::Output*> input_tensors;
- switch (opts.operation()){
+ std::vector<mir::Operation::Output *> input_tensors;
+ switch (opts.operation())
+ {
case EltwiseParameter_EltwiseOp_PROD:
optype = ops::ElementwiseOp::OpType::mul;
- for (auto& i: inputs)
+ for (auto &i : inputs)
input_tensors.push_back(i);
break;
case EltwiseParameter_EltwiseOp_SUM:
optype = ops::ElementwiseOp::OpType::add;
- if (!opts.coeff().empty()) {
+ if (!opts.coeff().empty())
+ {
assert(opts.coeff().size() == static_cast<int>(inputs.size()));
- for (int i = 0; i < opts.coeff().size(); i++) {
- if (opts.coeff().Get(i) != 1.0f) {
+ for (int i = 0; i < opts.coeff().size(); i++)
+ {
+ if (opts.coeff().Get(i) != 1.0f)
+ {
TensorVariant coeff_tensor(DTYPE::FLOAT32, Shape{1}, &opts.coeff().Get(i));
auto coeff_const = createOp<ops::ConstantOp>(layer.name() + "_const", coeff_tensor);
- std::vector<mir::Operation::Output*> mul_inputs;
+ std::vector<mir::Operation::Output *> mul_inputs;
mul_inputs.push_back(coeff_const->getOutput(0));
mul_inputs.push_back(inputs[i]);
- auto mul = createOp<ops::ElementwiseOp>(layer.name() + "_mul",
- mul_inputs, ops::ElementwiseOp::OpType::mul);
+ auto mul = createOp<ops::ElementwiseOp>(layer.name() + "_mul", mul_inputs,
+ ops::ElementwiseOp::OpType::mul);
input_tensors.push_back(mul->getOutput(0));
- } else {
+ }
+ else
+ {
input_tensors.push_back(inputs[i]);
}
}
- } else {
- for (auto& i: inputs)
+ }
+ else
+ {
+ for (auto &i : inputs)
input_tensors.push_back(i);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
optype = ops::ElementwiseOp::OpType::max;
- for (auto& i: inputs)
+ for (auto &i : inputs)
input_tensors.push_back(i);
break;
}
return {elementwise->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertSplit(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- std::vector<mir::Operation::Output*> outputs(layer.top_size(), inputs.at(0));
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertSplit(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ std::vector<mir::Operation::Output *> outputs(layer.top_size(), inputs.at(0));
return outputs;
}
-void CaffeOpCreator::checkLSTM(const caffe::LayerParameter& layer,
- std::set<std::string>& problems_ops_set) {
- const auto& params = layer.recurrent_param();
+void CaffeOpCreator::checkLSTM(const caffe::LayerParameter &layer,
+ std::set<std::string> &problems_ops_set)
+{
+ const auto ¶ms = layer.recurrent_param();
if (params.expose_hidden())
problems_ops_set.insert("LSTM: parameter 'expose_hidden' has unsupported value: " +
std::to_string(params.expose_hidden()));
}
-static TensorVariant createZeroedTensor(const mir::Shape& shape) {
+static TensorVariant createZeroedTensor(const mir::Shape &shape)
+{
// TODO For now it is hardcoded float32.
auto elem_type = mir::DTYPE::FLOAT32;
std::vector<float> zeros(static_cast<std::size_t>(shape.numElements()), 0.0f);
* In this implementation the inner products for all gates are performed as single inner product for
* efficiency.
*/
-std::vector<mir::Operation::Output*>
-CaffeOpCreator::convertLSTM(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& params = layer.recurrent_param();
+std::vector<mir::Operation::Output *>
+CaffeOpCreator::convertLSTM(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto ¶ms = layer.recurrent_param();
// Inputs to the layer.
auto x = inputs[0];
auto cont = inputs[1];
assert(inputs.size() == 2);
- const auto& x_shape = x->getShape();
+ const auto &x_shape = x->getShape();
const int32_t seq_length = x_shape.dim(0);
const int32_t batch_size = x_shape.dim(1);
const int32_t hidden_size = params.num_output();
auto x_xw_b = createOp<ops::BiasAddOp>("", x_xw, xb)->getOutput(0);
// Split input and continuation tensors into seq_length slices.
- std::vector<mir::Operation::Output*> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
- std::vector<mir::Operation::Output*> cont_slices = createSplit(cont, seq_length, 0);
- std::vector<mir::Operation::Output*> h_slices(seq_length);
+ std::vector<mir::Operation::Output *> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
+ std::vector<mir::Operation::Output *> cont_slices = createSplit(cont, seq_length, 0);
+ std::vector<mir::Operation::Output *> h_slices(seq_length);
- for (int32_t t = 0; t < seq_length; t++) {
+ for (int32_t t = 0; t < seq_length; t++)
+ {
auto c_cont_t = createMul(c_t, cont_slices[t]);
auto h_cont_t = createMul(h_t, cont_slices[t]);
#include "caffe/proto/caffe.pb.h"
-namespace nnc {
+namespace nnc
+{
using mir::TensorVariant;
-class CaffeOpCreator {
+class CaffeOpCreator
+{
public:
- explicit CaffeOpCreator(mir::Graph* g) : _graph(g) {};
+ explicit CaffeOpCreator(mir::Graph *g) : _graph(g){};
- std::vector<mir::Operation::Output*>
- convertInput(const caffe::LayerParameter& layer);
+ std::vector<mir::Operation::Output *> convertInput(const caffe::LayerParameter &layer);
- std::vector<mir::Operation::Output*>
- convertConvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertConvolution(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertInnerProduct(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertInnerProduct(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertConcat(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertConcat(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertPooling(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertPooling(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSoftmax(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSoftmax(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertReshape(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertReshape(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertReLU(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertReLU(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertScale(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertScale(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertBatchNorm(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertBatchNorm(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertDropout(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertDropout(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertDeconvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertDeconvolution(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertELU(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertELU(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertEmbed(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertEmbed(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSigmoid(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSigmoid(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertTanH(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertTanH(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertEltwise(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertEltwise(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSplit(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSplit(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertLSTM(const caffe::LayerParameter& layer,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertLSTM(const caffe::LayerParameter &layer,
+ const std::vector<mir::Operation::Output *> &inputs);
- void checkConvolution(const caffe::ConvolutionParameter& opts,
- std::set<std::string>& problems_ops_set);
+ void checkConvolution(const caffe::ConvolutionParameter &opts,
+ std::set<std::string> &problems_ops_set);
- void checkPooling(const caffe::PoolingParameter& opts,
- std::set<std::string>& problems_ops_set);
+ void checkPooling(const caffe::PoolingParameter &opts, std::set<std::string> &problems_ops_set);
- void checkReshape(const caffe::ReshapeParameter& opts,
- std::set<std::string>& problems_ops_set);
+ void checkReshape(const caffe::ReshapeParameter &opts, std::set<std::string> &problems_ops_set);
- void checkBatchNorm(const caffe::LayerParameter& layer,
- std::set<std::string>& problems_ops_set);
+ void checkBatchNorm(const caffe::LayerParameter &layer, std::set<std::string> &problems_ops_set);
- void checkLSTM(const caffe::LayerParameter& layer, std::set<std::string>& problems_ops_set);
+ void checkLSTM(const caffe::LayerParameter &layer, std::set<std::string> &problems_ops_set);
private:
- mir::Graph* _graph = nullptr;
+ mir::Graph *_graph = nullptr;
- mir::Operation::Output* convertCaffeToMIR(mir::Operation::Output* arg);
+ mir::Operation::Output *convertCaffeToMIR(mir::Operation::Output *arg);
- mir::Operation::Output* convertMIRToCaffe(mir::Operation::Output* arg);
+ mir::Operation::Output *convertMIRToCaffe(mir::Operation::Output *arg);
- mir::Operation::Output* createAdd(mir::Operation::Output* arg1, mir::Operation::Output* arg2);
+ mir::Operation::Output *createAdd(mir::Operation::Output *arg1, mir::Operation::Output *arg2);
- mir::Operation::Output* createMul(mir::Operation::Output* arg1, mir::Operation::Output* arg2);
+ mir::Operation::Output *createMul(mir::Operation::Output *arg1, mir::Operation::Output *arg2);
- std::vector<mir::Operation::Output*>
- createSplit(mir::Operation::Output* arg, int32_t num_parts, int32_t axis);
+ std::vector<mir::Operation::Output *> createSplit(mir::Operation::Output *arg, int32_t num_parts,
+ int32_t axis);
- mir::Operation::Output*
- createFullyConnected(mir::Operation::Output* input,
- mir::Operation::Output* weights,
- int32_t axis);
+ mir::Operation::Output *createFullyConnected(mir::Operation::Output *input,
+ mir::Operation::Output *weights, int32_t axis);
- TensorVariant convertBlob(const caffe::BlobProto& blob);
+ TensorVariant convertBlob(const caffe::BlobProto &blob);
template <typename OpType, typename... Types>
- mir::Operation* createOp(const std::string& name, Types&& ... args);
+ mir::Operation *createOp(const std::string &name, Types &&... args);
};
template <typename OpType, typename... Types>
-mir::Operation*
-CaffeOpCreator::createOp(const std::string& name, Types&& ... args) {
+mir::Operation *CaffeOpCreator::createOp(const std::string &name, Types &&... args)
+{
return _graph->create<OpType>(name, std::forward<Types>(args)...);
}
} // namespace nnc
-#endif //NNCC_CAFFE_OP_CREATOR_H
+#endif // NNCC_CAFFE_OP_CREATOR_H
#ifndef NNCC_CAFFE_OP_TYPES_H
#define NNCC_CAFFE_OP_TYPES_H
-namespace nnc {
+namespace nnc
+{
-enum class CaffeOpType {
+enum class CaffeOpType
+{
absVal,
accuracy,
argMax,
windowData
};
-} // namespace nnc
+} // namespace nnc
-#endif //NNCC_CAFFE_OP_TYPES_H
+#endif // NNCC_CAFFE_OP_TYPES_H
#include <functional>
#include <iostream>
-namespace nnc {
+namespace nnc
+{
static void loadModelFile(const std::string &filename, onnx::ModelProto *model)
{
collectUnsupportedOps();
}
-void ONNXImporterImpl::collectUnsupportedOps() {
+void ONNXImporterImpl::collectUnsupportedOps()
+{
std::set<std::string> problems_op_set;
- for (int i = 0; i < _model->graph().node_size(); i++) {
+ for (int i = 0; i < _model->graph().node_size(); i++)
+ {
auto *onnx_node = &(_model->graph().node(i));
assert(onnx_node->has_op_type());
auto op_type = onnx_node->op_type().c_str();
auto *ir_op_type = ONNXPerfectHash::getONNXOpType(op_type, onnx_node->op_type().size());
- switch (ir_op_type->opCode) {
+ switch (ir_op_type->opCode)
+ {
case ONNXOpCode::opAdd:
case ONNXOpCode::opAveragePool:
- case ONNXOpCode::opGivenTensorFill: // experimental
+ case ONNXOpCode::opGivenTensorFill: // experimental
case ONNXOpCode::opGlobalAveragePool:
case ONNXOpCode::opBatchNormalization:
case ONNXOpCode::opConcat:
problems_op_set.insert(op_type);
}
}
- if (!problems_op_set.empty()) {
+ if (!problems_op_set.empty())
+ {
std::cerr << "The following operators are not supported:\n";
- for (const auto& op : problems_op_set)
+ for (const auto &op : problems_op_set)
std::cerr << op << std::endl;
throw std::runtime_error("Unsupported operators found");
}
}
-mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto* tensor) {
+mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto *tensor)
+{
mir::DTYPE type;
- const void* src_data;
+ const void *src_data;
mir::Shape shape(tensor->dims_size());
- for (int i = 0; i < tensor->dims_size(); ++i) {
- shape.dim(i) = tensor->dims(i);
+ for (int i = 0; i < tensor->dims_size(); ++i)
+ {
+ shape.dim(i) = tensor->dims(i);
}
- if (tensor->float_data_size() != 0) {
+ if (tensor->float_data_size() != 0)
+ {
assert(tensor->data_type() == onnx::TensorProto::FLOAT);
type = mir::DTYPE::FLOAT32;
src_data = tensor->float_data().data();
- } else if (tensor->double_data_size() != 0) {
+ }
+ else if (tensor->double_data_size() != 0)
+ {
assert(tensor->data_type() == onnx::TensorProto::DOUBLE);
type = mir::DTYPE::FLOAT64;
src_data = tensor->double_data().data();
- } else if (tensor->int32_data_size() != 0) {
+ }
+ else if (tensor->int32_data_size() != 0)
+ {
assert(tensor->data_type() == onnx::TensorProto::INT32);
type = mir::DTYPE::INT32;
src_data = tensor->int32_data().data();
- } else if (tensor->int64_data_size() != 0) {
+ }
+ else if (tensor->int64_data_size() != 0)
+ {
assert(tensor->data_type() == onnx::TensorProto::INT64);
type = mir::DTYPE::INT64;
src_data = tensor->int64_data().data();
- } else if (tensor->has_raw_data()) {
- switch (tensor->data_type()) {
+ }
+ else if (tensor->has_raw_data())
+ {
+ switch (tensor->data_type())
+ {
case onnx::TensorProto::FLOAT:
type = mir::DTYPE::FLOAT32;
break;
throw std::runtime_error("Unsupported data type");
}
src_data = tensor->raw_data().data();
- } else {
+ }
+ else
+ {
throw std::runtime_error("Invalid data in Proto file, investigate");
}
return mir::TensorVariant(type, shape, src_data);
}
-void ONNXImporterImpl::createGraphInputs() {
- auto& graph = _model->graph();
- auto& initializer = graph.initializer();
- auto& value_info = graph.value_info();
- std::map<std::string, const onnx::TensorProto*> onnx_tensors;
+void ONNXImporterImpl::createGraphInputs()
+{
+ auto &graph = _model->graph();
+ auto &initializer = graph.initializer();
+ auto &value_info = graph.value_info();
+ std::map<std::string, const onnx::TensorProto *> onnx_tensors;
// Collect all initializers of the given graph
- for (int i = 0; i < graph.initializer_size(); i++) {
- const onnx::TensorProto& tensor = graph.initializer(i);
+ for (int i = 0; i < graph.initializer_size(); i++)
+ {
+ const onnx::TensorProto &tensor = graph.initializer(i);
assert(onnx_tensors.find(tensor.name()) == onnx_tensors.end());
onnx_tensors[tensor.name()] = &tensor;
}
- for (auto& input : graph.input()) {
+ for (auto &input : graph.input())
+ {
assert(input.has_name());
auto name = input.name();
- if (onnx_tensors.find(name) != onnx_tensors.end()) {
- const onnx::TensorProto* onnx_tensor = onnx_tensors[name];
+ if (onnx_tensors.find(name) != onnx_tensors.end())
+ {
+ const onnx::TensorProto *onnx_tensor = onnx_tensors[name];
_constantTensors.insert(std::make_pair(name, createTensor(onnx_tensor)));
auto constant = _graph->create<mir::ops::ConstantOp>(name, _constantTensors.at(name));
_tensorNameToOutput[name] = constant->getOutput(0);
- } else {
- const auto& onnx_input_shape = input.type().tensor_type().shape();
+ }
+ else
+ {
+ const auto &onnx_input_shape = input.type().tensor_type().shape();
mir::Shape shape(onnx_input_shape.dim_size());
- for (int i = 0; i < onnx_input_shape.dim_size(); i++) {
+ for (int i = 0; i < onnx_input_shape.dim_size(); i++)
+ {
assert(onnx_input_shape.dim(i).has_dim_value());
shape.dim(i) = static_cast<int32_t>(onnx_input_shape.dim(i).dim_value());
}
}
}
-mir::Graph *ONNXImporterImpl::createIR() {
+mir::Graph *ONNXImporterImpl::createIR()
+{
createGraphInputs();
// for all nodes in onnx graph
- for (auto& onnx_node : _model->graph().node()) {
+ for (auto &onnx_node : _model->graph().node())
+ {
assert(onnx_node.has_op_type());
auto op_type = onnx_node.op_type().c_str();
// Fill inputs of the given node
- std::vector<mir::Operation::Output*> inputs(onnx_node.input_size());
- std::vector<mir::Operation::Output*> outputs;
+ std::vector<mir::Operation::Output *> inputs(onnx_node.input_size());
+ std::vector<mir::Operation::Output *> outputs;
- for (int i = 0; i < onnx_node.input_size(); i++) {
- auto& name = onnx_node.input(i);
- if (!name.empty()) {
+ for (int i = 0; i < onnx_node.input_size(); i++)
+ {
+ auto &name = onnx_node.input(i);
+ if (!name.empty())
+ {
assert(_tensorNameToOutput.find(name) != _tensorNameToOutput.end());
inputs[i] = _tensorNameToOutput[name];
}
}
- auto* onnx_op_type = ONNXPerfectHash::getONNXOpType(op_type, onnx_node.op_type().size());
+ auto *onnx_op_type = ONNXPerfectHash::getONNXOpType(op_type, onnx_node.op_type().size());
- switch (onnx_op_type->opCode) {
+ switch (onnx_op_type->opCode)
+ {
case ONNXOpCode::opConstant:
outputs = _opCreator.convertConstant(onnx_node, _constantTensors);
break;
std::to_string(static_cast<int>(onnx_op_type->opCode)));
}
// Set outputs' names
- for (int i = 0; i < outputs.size(); i++) {
+ for (int i = 0; i < outputs.size(); i++)
+ {
outputs[i]->getNode()->setName(onnx_node.output(i));
auto result = _tensorNameToOutput.emplace(outputs[i]->getNode()->getName(), outputs[i]);
- if(!result.second)
+ if (!result.second)
throw std::runtime_error("Name duplication: " + outputs[i]->getNode()->getName());
}
- assert (!outputs.empty());
+ assert(!outputs.empty());
// FIXME: it should be done properly via the given graph outputs
_graphOutputs.assign(outputs.begin(), outputs.end());
}
// set graph outputs
// TODO: it should be done with onnx graph outputs
- for (auto output : _graphOutputs) {
+ for (auto output : _graphOutputs)
+ {
_graph->create<mir::ops::OutputOp>(output->getNode()->getName(), output);
output->getNode()->setName("");
}
#include <memory>
#include <string>
-namespace nnc {
+namespace nnc
+{
-class ONNXImporterImpl {
+class ONNXImporterImpl
+{
public:
- explicit ONNXImporterImpl(std::string filename) {
+ explicit ONNXImporterImpl(std::string filename)
+ {
_modelFilename = std::move(filename);
_graph = new mir::Graph();
_opCreator.setMirGraph(_graph);
void import();
mir::Graph *createIR();
- static mir::TensorVariant createTensor(const onnx::TensorProto* tensor);
+ static mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
private:
void createGraphInputs();
void collectUnsupportedOps();
// Maps ONNX tensor names to corresponding MIR operation outputs.
- std::map<std::string, mir::Operation::Output*> _tensorNameToOutput;
+ std::map<std::string, mir::Operation::Output *> _tensorNameToOutput;
// This map keeps named tensors used as graph input initializers.
// In addition here could be tensors from opGivenTensorFill and opConstant
std::map<std::string, mir::TensorVariant> _constantTensors;
- std::vector<mir::Operation::Output*> _graphOutputs;
+ std::vector<mir::Operation::Output *> _graphOutputs;
std::string _modelFilename;
std::unique_ptr<onnx::ModelProto> _model;
- mir::Graph* _graph;
+ mir::Graph *_graph;
ONNXOpCreator _opCreator;
};
} // namespace nnc
#include <iostream>
#include <set>
-namespace nnc {
+namespace nnc
+{
static mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel)
{
using namespace mir;
-static const onnx::AttributeProto* findAttribute(const onnx::NodeProto& onnx_node,
- const std::string& name) {
- for (auto& att : onnx_node.attribute()) {
- if (att.name() == name) {
+static const onnx::AttributeProto *findAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name)
+{
+ for (auto &att : onnx_node.attribute())
+ {
+ if (att.name() == name)
+ {
return &att;
}
}
return nullptr;
}
-static std::pair<bool, int> getIntAttribute(const onnx::NodeProto& onnx_node,
- const std::string& name = "axis") {
+static std::pair<bool, int> getIntAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name = "axis")
+{
auto result = findAttribute(onnx_node, name);
if (!result)
return {false, 0};
return {true, result->i()};
}
-static std::pair<bool, std::string> getStringAttribute(const onnx::NodeProto& onnx_node,
- const std::string& name) {
+static std::pair<bool, std::string> getStringAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name)
+{
auto result = findAttribute(onnx_node, name);
if (!result)
return {false, ""};
return {true, result->s()};
}
-static std::pair<bool, float> getFloatAttribute(const onnx::NodeProto& onnx_node,
- const std::string& name) {
+static std::pair<bool, float> getFloatAttribute(const onnx::NodeProto &onnx_node,
+ const std::string &name)
+{
auto result = findAttribute(onnx_node, name);
if (!result)
return {false, 0.0};
// Create vector tensor filled with the given value
// TODO: it should be template
-static TensorVariant createTensor(float value, const mir::Shape& shape) {
+static TensorVariant createTensor(float value, const mir::Shape &shape)
+{
std::vector<float> values(static_cast<std::size_t>(shape.numElements()), value);
return mir::TensorVariant(mir::DTYPE::FLOAT32, {shape.numElements()}, values.data());
}
-struct KernelStridesPadding {
+struct KernelStridesPadding
+{
Shape kernel_shape;
Shape strides_shape;
std::vector<int32_t> padding_before{0, 0};
std::vector<int32_t> padding_after{0, 0};
};
-static void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata) {
- auto* kshape = findAttribute(onnx_node, "kernel_shape");
+static void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata)
+{
+ auto *kshape = findAttribute(onnx_node, "kernel_shape");
assert(kshape && kshape->ints_size());
- auto* strides = findAttribute(onnx_node, "strides");
+ auto *strides = findAttribute(onnx_node, "strides");
assert(strides && strides->ints_size());
- auto* pads = findAttribute(onnx_node, "pads");
+ auto *pads = findAttribute(onnx_node, "pads");
cdata.kernel_shape = mir::Shape(kshape->ints_size());
- for (int i = 0; i < kshape->ints_size(); ++i) {
+ for (int i = 0; i < kshape->ints_size(); ++i)
+ {
cdata.kernel_shape.dim(i) = kshape->ints(i);
}
cdata.strides_shape = mir::Shape(strides->ints_size());
- for (int i = 0; i < strides->ints_size(); ++i) {
+ for (int i = 0; i < strides->ints_size(); ++i)
+ {
cdata.strides_shape.dim(i) = strides->ints(i);
}
- if (pads) {
+ if (pads)
+ {
assert(pads->ints_size() == 4);
cdata.padding_before[0] = pads->ints(0);
cdata.padding_before[1] = pads->ints(1);
}
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertConv2D(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertConv2D(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
assert(inputs.size() >= 2);
KernelStridesPadding cdata;
getKernelStridesPadding(onnx_node, cdata);
// FIXME: It can be non-constant value.
- auto* in_weights = dynamic_cast<mir::ops::ConstantOp*>(inputs[1]->getNode());
+ auto *in_weights = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
assert(in_weights && "Weights could be a constant tensor only");
- const auto& in_weights_tensor = in_weights->getValue();
+ const auto &in_weights_tensor = in_weights->getValue();
// We should transpose ONNX MC(IO)HW to HWOI
auto kernel_tensor = transposeTensor<2, 3, 1, 0>(in_weights_tensor);
auto in_group_size = kernel_tensor.getShape().dim(2);
num_groups = 1;
bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
- mir::Operation* result;
+ mir::Operation *result;
auto transposed_input = convertONNXToMIR(inputs[0]);
- if (is_depthwise) {
+ if (is_depthwise)
+ {
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
auto kernel = createOp<ops::ConstantOp>(transposed_tensor)->getOutput(0);
result = createOp<ops::DepthwiseConv2DOp>(transposed_input, kernel, cdata.strides_shape,
cdata.padding_before, cdata.padding_after);
- } else {
+ }
+ else
+ {
// first we need to convert kernel of grouped convolution to appropriate ordinary kernel
if (num_groups != 1)
kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
return {convertMIRToONNX(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertConcat(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertConcat(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
bool found;
int axis;
- std::tie (found, axis) = getIntAttribute(onnx_node);
+ std::tie(found, axis) = getIntAttribute(onnx_node);
if (!found)
throw std::runtime_error("Concat must have 'axis' attribute");
auto result = createOp<ops::ConcatOp>(inputs, axis);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertGather(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertGather(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
bool found;
int value;
std::tie(found, value) = getIntAttribute(onnx_node, "axis");
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertPad(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertPad(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
bool found;
float value;
std::tie(found, value) = getFloatAttribute(onnx_node, "value");
auto modeAtt = findAttribute(onnx_node, "mode");
assert(modeAtt);
auto mode = modeAtt->s();
- const mir::Scalar scalar(reinterpret_cast<const char*>(&value), DTYPE::FLOAT32, sizeof(float));
+ const mir::Scalar scalar(reinterpret_cast<const char *>(&value), DTYPE::FLOAT32, sizeof(float));
assert(padsAtt->ints_size() > 0);
int cnt = padsAtt->ints_size() / 2;
assert(cnt % 2 == 0);
int last = padsAtt->ints_size() - 1;
- std::vector<std::pair<int32_t, int32_t >> vec(cnt);
- auto* data = padsAtt->ints().data();
- for (int i = 0; i < cnt; i++) {
+ std::vector<std::pair<int32_t, int32_t>> vec(cnt);
+ auto *data = padsAtt->ints().data();
+ for (int i = 0; i < cnt; i++)
+ {
auto pair = std::make_pair(data[i], data[last - i]);
vec[i] = pair;
}
- auto result =
- createOp<ops::PadOp>(inputs[0], inputs[0]->getShape().rank(), vec, scalar);
+ auto result = createOp<ops::PadOp>(inputs[0], inputs[0]->getShape().rank(), vec, scalar);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertPool(const std::vector<mir::Operation::Output*>& inputs,
- ONNXOpCode op_code,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertPool(const std::vector<mir::Operation::Output *> &inputs, ONNXOpCode op_code,
+ const onnx::NodeProto &onnx_node)
+{
ops::PoolOp::BorderType border_type;
ops::PoolOp::PoolingType pool_type;
// Transpose ONNX NCHW to MIR NHWC
auto t_input = convertONNXToMIR(inputs[0]);
- switch (op_code) {
- case ONNXOpCode::opGlobalAveragePool: {
+ switch (op_code)
+ {
+ case ONNXOpCode::opGlobalAveragePool:
+ {
border_type = ops::PoolOp::BorderType::ZEROFILLED;
pool_type = ops::PoolOp::PoolingType::AVG;
// GlobalAveragePool is equivalent to AveragePool with kernel size equal
return {convertMIRToONNX(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
int axis;
bool found;
- std::tie (found, axis) = getIntAttribute(onnx_node);
+ std::tie(found, axis) = getIntAttribute(onnx_node);
axis = found ? axis : 1;
auto result = createOp<ops::SoftmaxOp>(inputs[0], axis);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertReshape(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertReshape(const std::vector<mir::Operation::Output *> &inputs)
+{
// The original shape
- const auto& in_shape = inputs[0]->getShape();
+ const auto &in_shape = inputs[0]->getShape();
// Input tensor describing the new shape
// TODO: could it be not a constant?
- auto* op = dynamic_cast<mir::ops::ConstantOp*>(inputs[1]->getNode());
+ auto *op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
assert(op && "We support constants only");
auto shape_tensor = op->getValue();
Shape shape_tensor_shape = (shape_tensor).getShape();
assert(shape_tensor_shape.rank() == 1);
// The rank of the new shape
- auto cnt = shape_tensor_shape.numElements();
+ auto cnt = shape_tensor_shape.numElements();
// The vector to build the new shape from
- std::vector<int32_t > shape_vector(cnt);
+ std::vector<int32_t> shape_vector(cnt);
ShapeRange out_range(shape_tensor_shape);
Tensor<int64_t> tensor_accessor(shape_tensor);
int i = 0;
- for (auto idx : out_range) {
+ for (auto idx : out_range)
+ {
if (tensor_accessor.at(idx) == 0)
shape_vector[i] = in_shape.dim(i);
else if (tensor_accessor.at(idx) == -1)
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertUnsqueeze(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
- auto* axes = findAttribute(onnx_node, "axes");
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertUnsqueeze(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
+ auto *axes = findAttribute(onnx_node, "axes");
assert(axes && axes->ints_size());
- const Shape& input_shape = inputs[0]->getShape();
+ const Shape &input_shape = inputs[0]->getShape();
const int out_rank = input_shape.rank() + axes->ints_size();
Shape out_shape(out_rank);
auto ints_iterator = axes->ints().begin();
int j = 0;
- for (int i = 0; i < out_rank; i++) {
- if (ints_iterator < axes->ints().end() && i == *ints_iterator) {
+ for (int i = 0; i < out_rank; i++)
+ {
+ if (ints_iterator < axes->ints().end() && i == *ints_iterator)
+ {
out_shape.dim(i) = 1;
ints_iterator++;
- } else {
+ }
+ else
+ {
out_shape.dim(i) = input_shape.dim(j);
j++;
}
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertRelu(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertRelu(const std::vector<mir::Operation::Output *> &inputs)
+{
assert(inputs.size() == 1);
auto result = createOp<ops::ReluOp>(inputs[0]);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertSigmoid(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertSigmoid(const std::vector<mir::Operation::Output *> &inputs)
+{
assert(inputs.size() == 1);
auto result = createOp<ops::SigmoidOp>(inputs[0]);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertElementwise(const std::vector<mir::Operation::Output*>& inputs,
- mir::ops::ElementwiseOp::OpType op_type) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertElementwise(const std::vector<mir::Operation::Output *> &inputs,
+ mir::ops::ElementwiseOp::OpType op_type)
+{
auto result = createOp<ops::ElementwiseOp>(inputs, op_type);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertUpsample(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertUpsample(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
bool success;
std::string mode;
std::tie(success, mode) = getStringAttribute(onnx_node, "mode");
- if (!success) mode = "nearest";
+ if (!success)
+ mode = "nearest";
assert(mode == "nearest" && "Unsupported upscale mode!");
// relies on attributes being lifted to constants (ONNX optimization pass)
assert(inputs.size() > 1);
- auto* scales = dynamic_cast<mir::ops::ConstantOp*>(inputs[1]->getNode());
+ auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
assert(scales && "Weights could be a constant tensor only");
auto scales_tensor = Tensor<float>(scales->getValue());
int rank = inputs[0]->getShape().rank();
assert(scales_tensor.getShape().numElements() == rank &&
- "The number of elements of 'scales' should be the same as the rank of input 'X'"
- );
+ "The number of elements of 'scales' should be the same as the rank of input 'X'");
assert(rank == 4 && "Only rank 4 is supported");
std::vector<float> scales_vector(4);
const int onnx2mir[] = {0, 3, 1, 2};
scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
return {convertMIRToONNX(createOp<ops::ResizeOp>(convertONNXToMIR(inputs[0]),
ops::ResizeOp::ResizeMethod::nearestNeighbor,
- scales_vector)->getOutput(0))};
+ scales_vector)
+ ->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertBatchNorm(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertBatchNorm(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node, InputTensors &input_tensors)
+{
// overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
bool found;
float value;
float epsilon = found ? value : 1e-05f;
// TODO: it's better to do it via inputs
- const auto& scale_tensor = input_tensors.at(inputs[1]->getNode()->getName());
- const auto& bias_tensor = input_tensors.at(inputs[2]->getNode()->getName());
- const auto& mean_tensor = input_tensors.at(inputs[3]->getNode()->getName());
- const auto& var_tensor = input_tensors.at(inputs[4]->getNode()->getName());
+ const auto &scale_tensor = input_tensors.at(inputs[1]->getNode()->getName());
+ const auto &bias_tensor = input_tensors.at(inputs[2]->getNode()->getName());
+ const auto &mean_tensor = input_tensors.at(inputs[3]->getNode()->getName());
+ const auto &var_tensor = input_tensors.at(inputs[4]->getNode()->getName());
// res1 = X - mean
Tensor<float> bias_data(mean_tensor);
- for (auto& idx: ShapeRange(bias_data.getShape()))
+ for (auto &idx : ShapeRange(bias_data.getShape()))
bias_data.at(idx) *= -1;
auto data = convertONNXToMIR(inputs[0]);
// res2 = res1 * scale / (var + epsilon)
Tensor<float> multiplier(scale_tensor);
Tensor<float> var_accessor(var_tensor);
- for (auto& idx: ShapeRange(scale_tensor.getShape()))
+ for (auto &idx : ShapeRange(scale_tensor.getShape()))
multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
auto scale = createOp<ops::ConstantOp>(scale_tensor)->getOutput(0);
result = createOp<ops::ScaleOp>(result->getOutput(0), scale);
return {convertMIRToONNX(result->getOutput(0))};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertDropout(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertDropout(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
bool found;
float value;
std::tie(found, value) = getFloatAttribute(onnx_node, "ratio");
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertScale(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertScale(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
bool found;
float value;
std::tie(found, value) = getFloatAttribute(onnx_node, "scale");
float scale_val = found ? value : 1.0;
- const auto& shape = inputs[0]->getShape();
+ const auto &shape = inputs[0]->getShape();
auto scale_tensor = createTensor(scale_val, shape);
auto scale = createOp<ops::ConstantOp>(scale_tensor)->getOutput(0);
auto result = createOp<ops::ScaleOp>(inputs[0], scale);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertShape(const std::vector<mir::Operation::Output*>& inputs) {
- const auto& input_shape = inputs[0]->getShape();
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertShape(const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &input_shape = inputs[0]->getShape();
int size = input_shape.rank();
Shape output_shape{size};
std::vector<float> data(static_cast<std::size_t>(size));
- for (int i = 0; i < size; i++) {
+ for (int i = 0; i < size; i++)
+ {
data[i] = input_shape.dim(i);
}
TensorVariant tensor(DTYPE::FLOAT32, output_shape, data.data());
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto &onnx_node, InputTensors &input_tensors)
+{
auto values_att = findAttribute(onnx_node, "values");
auto shape_att = findAttribute(onnx_node, "shape");
assert(values_att && shape_att);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertConstant(const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors) {
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertConstant(const onnx::NodeProto &onnx_node, InputTensors &input_tensors)
+{
assert((onnx_node.attribute_size() == 1) &&
(onnx_node.attribute(0).type() == onnx::AttributeProto_AttributeType_TENSOR) &&
(onnx_node.attribute(0).tensors_size() == 0));
assert(onnx_node.attribute(0).name() == "value");
auto name = onnx_node.output(0);
- auto& onnx_tensor = onnx_node.attribute(0).t();
+ auto &onnx_tensor = onnx_node.attribute(0).t();
auto mir_tensor = ONNXImporterImpl::createTensor(&onnx_tensor);
input_tensors.insert(std::make_pair(name, mir_tensor));
auto op = _graph->create<mir::ops::ConstantOp>(name, mir_tensor)->getOutput(0);
return {op};
}
-std::vector<mir::Operation::Output*>
-ONNXOpCreator::convertGemm(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node) {
- bool found;
- int ivalue;
+std::vector<mir::Operation::Output *>
+ONNXOpCreator::convertGemm(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node)
+{
+ bool found;
+ int ivalue;
float fvalue;
// Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),
// same for B and transB. This operator supports unidirectional broadcasting
// (tensor C should be unidirectional broadcastable to tensor A * B).
- std::tie (found, ivalue) = getIntAttribute(onnx_node, "transA");
+ std::tie(found, ivalue) = getIntAttribute(onnx_node, "transA");
bool trans_a = found ? static_cast<bool>(ivalue) : false;
- std::tie (found, ivalue) = getIntAttribute(onnx_node, "transB");
+ std::tie(found, ivalue) = getIntAttribute(onnx_node, "transB");
bool trans_b = found ? static_cast<bool>(ivalue) : false;
- std::tie (found, fvalue) = getFloatAttribute(onnx_node, "alpha");
+ std::tie(found, fvalue) = getFloatAttribute(onnx_node, "alpha");
float alpha_val = found ? fvalue : 1.0f;
- std::tie (found, fvalue) = getFloatAttribute(onnx_node, "beta");
+ std::tie(found, fvalue) = getFloatAttribute(onnx_node, "beta");
float beta_val = found ? fvalue : 1.0f;
// 1. Prepare input matrix A
// Flatten the shape by dim(0)
- const auto& in_shape = inputs[0]->getShape();
+ const auto &in_shape = inputs[0]->getShape();
mir::Shape shape0{in_shape.dim(0), in_shape.numElements() / in_shape.dim(0)};
auto input_a = createOp<ops::ReshapeOp>(inputs[0], shape0)->getOutput(0);
if (trans_a)
input_a = createOp<ops::TransposeOp>(input_a, std::vector<std::size_t>{1, 0})->getOutput(0);
- if (alpha_val != 1.0) {
+ if (alpha_val != 1.0)
+ {
auto alpha_tensor = createTensor(alpha_val, input_a->getShape());
auto alpha = createOp<ops::ConstantOp>(alpha_tensor)->getOutput(0);
input_a = createOp<ops::ScaleOp>(input_a, alpha)->getOutput(0);
//
auto input_c = inputs[2];
auto beta_tensor = createTensor(beta_val, input_c->getShape());
- if ((mult_a_b.rank() == 2) && (input_c->getShape().rank() == 1)) {
+ if ((mult_a_b.rank() == 2) && (input_c->getShape().rank() == 1))
+ {
beta_tensor = TensorVariant(beta_tensor, mult_a_b);
}
auto beta = createOp<ops::ConstantOp>(beta_tensor)->getOutput(0);
- std::vector<mir::Operation::Output*> mul_inputs = {beta, input_c};
- auto c_mult = createOp<ops::ElementwiseOp>(mul_inputs,
- ops::ElementwiseOp::OpType::mul)->getOutput(0);
+ std::vector<mir::Operation::Output *> mul_inputs = {beta, input_c};
+ auto c_mult =
+ createOp<ops::ElementwiseOp>(mul_inputs, ops::ElementwiseOp::OpType::mul)->getOutput(0);
assert(c_mult->getShape() == mult_a_b);
auto result = createOp<ops::GemmOp>(input_a, input_b, c_mult);
return {result->getOutput(0)};
}
-mir::Operation::Output* ONNXOpCreator::convertONNXToMIR(mir::Operation::Output* arg) {
+mir::Operation::Output *ONNXOpCreator::convertONNXToMIR(mir::Operation::Output *arg)
+{
// NCHW -> NHWC
return createOp<ops::TransposeOp>(arg, std::vector<std::size_t>{0, 2, 3, 1})->getOutput(0);
}
-mir::Operation::Output* ONNXOpCreator::convertMIRToONNX(mir::Operation::Output* arg) {
+mir::Operation::Output *ONNXOpCreator::convertMIRToONNX(mir::Operation::Output *arg)
+{
// NHWC -> NCHW
return createOp<ops::TransposeOp>(arg, std::vector<std::size_t>{0, 3, 1, 2})->getOutput(0);
}
#include <vector>
#include <memory>
-namespace nnc {
+namespace nnc
+{
-class ONNXOpCreator {
+class ONNXOpCreator
+{
public:
using InputTensors = std::map<std::string, mir::TensorVariant>;
ONNXOpCreator() = default;
- void setMirGraph(mir::Graph* g) { _graph = g; };
+ void setMirGraph(mir::Graph *g) { _graph = g; };
- std::vector<mir::Operation::Output*>
- convertConv2D(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertConv2D(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertConcat(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertConcat(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertGivenTensorFill(const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors);
+ std::vector<mir::Operation::Output *> convertGivenTensorFill(const onnx::NodeProto &onnx_node,
+ InputTensors &input_tensors);
- std::vector<mir::Operation::Output*>
- convertConstant(const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors);
+ std::vector<mir::Operation::Output *> convertConstant(const onnx::NodeProto &onnx_node,
+ InputTensors &input_tensors);
- std::vector<mir::Operation::Output*>
- convertPool(const std::vector<mir::Operation::Output*>& inputs,
- ONNXOpCode op_code,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertPool(const std::vector<mir::Operation::Output *> &inputs, ONNXOpCode op_code,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertPad(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertPad(const std::vector<mir::Operation::Output *> &inputs, const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertReshape(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertReshape(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertRelu(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertRelu(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSigmoid(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSigmoid(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertUnsqueeze(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertUnsqueeze(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertUpsample(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertUpsample(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertElementwise(const std::vector<mir::Operation::Output*>& inputs,
+ std::vector<mir::Operation::Output *>
+ convertElementwise(const std::vector<mir::Operation::Output *> &inputs,
mir::ops::ElementwiseOp::OpType op_type);
- std::vector<mir::Operation::Output*>
- convertScale(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertScale(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertShape(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertShape(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertBatchNorm(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors);
+ std::vector<mir::Operation::Output *>
+ convertBatchNorm(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node, InputTensors &input_tensors);
- std::vector<mir::Operation::Output*>
- convertDropout(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertDropout(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertGather(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertGather(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- std::vector<mir::Operation::Output*>
- convertGemm(const std::vector<mir::Operation::Output*>& inputs,
- const onnx::NodeProto& onnx_node);
+ std::vector<mir::Operation::Output *>
+ convertGemm(const std::vector<mir::Operation::Output *> &inputs,
+ const onnx::NodeProto &onnx_node);
- mir::Operation::Output* convertONNXToMIR(mir::Operation::Output* arg);
- mir::Operation::Output* convertMIRToONNX(mir::Operation::Output* arg);
+ mir::Operation::Output *convertONNXToMIR(mir::Operation::Output *arg);
+ mir::Operation::Output *convertMIRToONNX(mir::Operation::Output *arg);
private:
- template <typename OpType, typename ...Types>
- mir::Operation* createOp(Types&&... args);
- mir::Graph* _graph = nullptr;
+ template <typename OpType, typename... Types> mir::Operation *createOp(Types &&... args);
+ mir::Graph *_graph = nullptr;
};
-template<typename OpType, typename ...Types>
-mir::Operation* ONNXOpCreator::createOp(Types&&... args) {
+template <typename OpType, typename... Types>
+mir::Operation *ONNXOpCreator::createOp(Types &&... args)
+{
// TODO: set operation names
return _graph->create<OpType>("", std::forward<Types>(args)...);
}
} // namespace nnc
-#endif //NNCC_ONNX_OP_CREATOR_H
+#endif // NNCC_ONNX_OP_CREATOR_H
#ifndef NNCC_ONNX_OP_TYPES_H
#define NNCC_ONNX_OP_TYPES_H
-namespace nnc {
-enum class ONNXOpSupportState {
+namespace nnc
+{
+enum class ONNXOpSupportState
+{
unSupported,
fullySupported,
partiallySupported
};
-enum class ONNXOpCode {
+enum class ONNXOpCode
+{
opAbs,
opAcos,
opAdd,
opThresholdedRelu
}; // ONNXOpCode
-struct ONNXOpType {
+struct ONNXOpType
+{
const char *name;
ONNXOpCode opCode;
ONNXOpSupportState state;
}; // ONNXOpType
} // namespace nnc
-#endif //NNCC_ONNX_OP_TYPES_H
+#endif // NNCC_ONNX_OP_TYPES_H
/* Command-line: gperf --output-file=ONNXPerfectHash.h ONNXPerfect.gperf */
/* Computed positions: -k'1-2,$' */
-namespace nnc {
+namespace nnc
+{
-#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
- && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
- && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
- && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
- && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
- && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
- && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
- && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
- && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
- && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
- && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
- && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
- && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
- && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
- && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
- && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
- && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
- && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
- && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
- && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
- && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
- && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
- && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
+#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) && ('%' == 37) && ('&' == 38) && \
+ ('\'' == 39) && ('(' == 40) && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) && \
+ ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) && ('1' == 49) && ('2' == 50) && \
+ ('3' == 51) && ('4' == 52) && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) && \
+ ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) && ('=' == 61) && ('>' == 62) && \
+ ('?' == 63) && ('A' == 65) && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) && \
+ ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) && ('J' == 74) && ('K' == 75) && \
+ ('L' == 76) && ('M' == 77) && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) && \
+ ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) && ('V' == 86) && ('W' == 87) && \
+ ('X' == 88) && ('Y' == 89) && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) && \
+ ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) && ('c' == 99) && ('d' == 100) && \
+ ('e' == 101) && ('f' == 102) && ('g' == 103) && ('h' == 104) && ('i' == 105) && \
+ ('j' == 106) && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) && \
+ ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) && ('s' == 115) && \
+ ('t' == 116) && ('u' == 117) && ('v' == 118) && ('w' == 119) && ('x' == 120) && \
+ ('y' == 121) && ('z' == 122) && ('{' == 123) && ('|' == 124) && ('}' == 125) && \
+ ('~' == 126))
/* The character set is not based on ISO-646. */
-#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."
+#error \
+ "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."
#endif
#line 1 "ONNXPerfect.gperf"
/* maximum key range = 193, duplicates = 0 */
-class ONNXPerfectHash {
+class ONNXPerfectHash
+{
private:
- static inline unsigned int hash(const char* str, size_t len);
+ static inline unsigned int hash(const char *str, size_t len);
public:
- static const struct ONNXOpType* getONNXOpType(const char* str, size_t len);
+ static const struct ONNXOpType *getONNXOpType(const char *str, size_t len);
};
-inline unsigned int
-ONNXPerfectHash::hash(const char* str, size_t len) {
+inline unsigned int ONNXPerfectHash::hash(const char *str, size_t len)
+{
static const unsigned char asso_values[] = {
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 22,
- 0, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 40, 75, 0, 25, 35,
- 50, 25, 0, 65, 199, 0, 5, 0, 10, 65,
- 90, 199, 0, 20, 110, 82, 199, 199, 0, 199,
- 199, 199, 199, 199, 199, 199, 199, 0, 25, 60,
- 35, 5, 25, 125, 30, 65, 199, 199, 0, 40,
- 5, 15, 20, 60, 10, 90, 35, 40, 45, 45,
- 65, 55, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- 199, 199, 199, 199, 199, 199
- };
- return len + asso_values[static_cast<unsigned char>(str[1])] + asso_values[static_cast<unsigned char>(str[0])] + asso_values[static_cast<unsigned char>(str[len - 1])];
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 22, 0, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 40, 75, 0, 25, 35, 50, 25, 0, 65, 199, 0,
+ 5, 0, 10, 65, 90, 199, 0, 20, 110, 82, 199, 199, 0, 199, 199, 199, 199, 199, 199,
+ 199, 199, 0, 25, 60, 35, 5, 25, 125, 30, 65, 199, 199, 0, 40, 5, 15, 20, 60,
+ 10, 90, 35, 40, 45, 45, 65, 55, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199};
+ return len + asso_values[static_cast<unsigned char>(str[1])] +
+ asso_values[static_cast<unsigned char>(str[0])] +
+ asso_values[static_cast<unsigned char>(str[len - 1])];
}
-const struct ONNXOpType*
-ONNXPerfectHash::getONNXOpType(const char* str, size_t len) {
- enum {
+const struct ONNXOpType *ONNXPerfectHash::getONNXOpType(const char *str, size_t len)
+{
+ enum
+ {
TOTAL_KEYWORDS = 118,
MIN_WORD_LENGTH = 2,
MAX_WORD_LENGTH = 19,
};
static const struct ONNXOpType wordlist[] = {
- {""}, {""}, {""}, {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
#line 84 "ONNXPerfect.gperf"
{"MatMul", ONNXOpCode::opMatMul, ONNXOpSupportState::unSupported},
#line 86 "ONNXPerfect.gperf"
{"Max", ONNXOpCode::opMax, ONNXOpSupportState::unSupported},
#line 117 "ONNXPerfect.gperf"
{"Selu", ONNXOpCode::opSelu, ONNXOpSupportState::unSupported},
- {""}, {""},
+ {""},
+ {""},
#line 71 "ONNXPerfect.gperf"
{"Hardmax", ONNXOpCode::opHardmax, ONNXOpSupportState::unSupported},
#line 89 "ONNXPerfect.gperf"
{"And", ONNXOpCode::opAnd, ONNXOpSupportState::unSupported},
#line 41 "ONNXPerfect.gperf"
{"Atan", ONNXOpCode::opAtan, ONNXOpSupportState::unSupported},
- {""}, {""},
+ {""},
+ {""},
#line 94 "ONNXPerfect.gperf"
{"Or", ONNXOpCode::opOr, ONNXOpSupportState::unSupported},
#line 130 "ONNXPerfect.gperf"
{""},
#line 63 "ONNXPerfect.gperf"
{"GRU", ONNXOpCode::opGRU, ONNXOpSupportState::unSupported},
- {""}, {""},
+ {""},
+ {""},
#line 36 "ONNXPerfect.gperf"
{"Add", ONNXOpCode::opAdd, ONNXOpSupportState::unSupported},
{""},
{""},
#line 58 "ONNXPerfect.gperf"
{"Exp", ONNXOpCode::opExp, ONNXOpSupportState::unSupported},
- {""}, {""},
+ {""},
+ {""},
#line 147 "ONNXPerfect.gperf"
{"ImageScaler", ONNXOpCode::opImageScaler, ONNXOpSupportState::unSupported},
#line 119 "ONNXPerfect.gperf"
{"Pad", ONNXOpCode::opPad, ONNXOpSupportState::unSupported},
#line 135 "ONNXPerfect.gperf"
{"TopK", ONNXOpCode::opTopK, ONNXOpSupportState::unSupported},
- {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
#line 124 "ONNXPerfect.gperf"
{"Softplus", ONNXOpCode::opSoftplus, ONNXOpSupportState::unSupported},
#line 136 "ONNXPerfect.gperf"
{"Transpose", ONNXOpCode::opTranspose, ONNXOpSupportState::unSupported},
#line 95 "ONNXPerfect.gperf"
{"PRelu", ONNXOpCode::opPRelu, ONNXOpSupportState::unSupported},
- {""}, {""},
+ {""},
+ {""},
#line 54 "ONNXPerfect.gperf"
{"Div", ONNXOpCode::opDiv, ONNXOpSupportState::unSupported},
#line 40 "ONNXPerfect.gperf"
{"Neg", ONNXOpCode::opNeg, ONNXOpSupportState::unSupported},
#line 133 "ONNXPerfect.gperf"
{"Tanh", ONNXOpCode::opTanh, ONNXOpSupportState::unSupported},
- {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
#line 79 "ONNXPerfect.gperf"
{"Log", ONNXOpCode::opLog, ONNXOpSupportState::unSupported},
- {""}, {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
+ {""},
#line 97 "ONNXPerfect.gperf"
{"Pow", ONNXOpCode::opPow, ONNXOpSupportState::unSupported},
#line 74 "ONNXPerfect.gperf"
{"InstanceNormalizati", ONNXOpCode::opInstanceNormalizati, ONNXOpSupportState::unSupported},
- {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
#line 34 "ONNXPerfect.gperf"
{"Abs", ONNXOpCode::opAbs, ONNXOpSupportState::unSupported},
#line 140 "ONNXPerfect.gperf"
{"ATen", ONNXOpCode::opATen, ONNXOpSupportState::unSupported},
- {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
#line 72 "ONNXPerfect.gperf"
{"Identity", ONNXOpCode::opIdentity, ONNXOpSupportState::fullySupported},
- {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
- {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
- {""}, {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
#line 134 "ONNXPerfect.gperf"
{"Tile", ONNXOpCode::opTile, ONNXOpSupportState::unSupported},
- {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
+ {""},
#line 35 "ONNXPerfect.gperf"
{"Acos", ONNXOpCode::opAcos, ONNXOpSupportState::unSupported},
#line 151 "ONNXPerfect.gperf"
{"ThresholdedRelu", ONNXOpCode::opThresholdedRelu, ONNXOpSupportState::unSupported},
- {""}, {""},
+ {""},
+ {""},
#line 148 "ONNXPerfect.gperf"
- {"ParametricSoftplus", ONNXOpCode::opParametricSoftplus, ONNXOpSupportState::unSupported}
- };
+ {"ParametricSoftplus", ONNXOpCode::opParametricSoftplus, ONNXOpSupportState::unSupported}};
- if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) {
+ if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
+ {
unsigned int key = hash(str, len);
- if (key <= MAX_HASH_VALUE) {
- const char* s = wordlist[key].name;
+ if (key <= MAX_HASH_VALUE)
+ {
+ const char *s = wordlist[key].name;
if (*str == *s && !strcmp(str + 1, s + 1))
return &wordlist[key];
}
}
return nullptr;
-} // ONNXPerfectHash::getONNXOpType (const char *str, size_t len)
+} // ONNXPerfectHash::getONNXOpType (const char *str, size_t len)
-} // namespace nnc
+} // namespace nnc
using namespace ::tflite;
-namespace nnc {
+namespace nnc
+{
-TfliteImporter::TfliteImporter(const std::string& filename) : _filename(filename) {
+TfliteImporter::TfliteImporter(const std::string &filename) : _filename(filename)
+{
_graph = new Graph();
_opCreator.reset(new TFLiteOpCreator(_graph));
}
TfliteImporter::~TfliteImporter() = default;
-void TfliteImporter::import() {
+void TfliteImporter::import()
+{
std::ifstream stream(_filename, std::ios::in | std::ios::binary);
if (stream.fail())
throw std::runtime_error("Couldn't open file \"" + _filename + "\".");
if (stream.fail())
throw std::runtime_error("Couldn't read file \"" + _filename + "\".");
- auto verifier = flatbuffers::Verifier(reinterpret_cast<const uint8_t*>(_modelRaw.get()),
- file_size);
+ auto verifier =
+ flatbuffers::Verifier(reinterpret_cast<const uint8_t *>(_modelRaw.get()), file_size);
if (!VerifyModelBuffer(verifier))
throw std::runtime_error("Could not load model: " + _filename + "\n");
collectUnsupportedOps();
}
-void TfliteImporter::collectUnsupportedOps() {
- for (auto sub_graph: *(_modelPacked->subgraphs()))
- for (auto op: *(sub_graph->operators()))
+void TfliteImporter::collectUnsupportedOps()
+{
+ for (auto sub_graph : *(_modelPacked->subgraphs()))
+ for (auto op : *(sub_graph->operators()))
processUnsupportedOp(op);
- if (!_problemsOpSet.empty()) {
+ if (!_problemsOpSet.empty())
+ {
std::string msg("NNC can't load model. Detected problems:");
- for (const auto& problemStr : _problemsOpSet)
+ for (const auto &problemStr : _problemsOpSet)
msg.append("\n * " + problemStr);
throw std::runtime_error(msg);
}
}
-void TfliteImporter::processUnsupportedOp(const Operator* op) {
+void TfliteImporter::processUnsupportedOp(const Operator *op)
+{
BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
- switch (opcode) {
+ switch (opcode)
+ {
case BuiltinOperator_MAX_POOL_2D:
case BuiltinOperator_AVERAGE_POOL_2D:
_opCreator->checkPool2D(op->builtin_options_as<Pool2DOptions>(), _problemsOpSet);
_problemsOpSet);
break;
case BuiltinOperator_STRIDED_SLICE:
- _opCreator->checkStridedSlice(op->builtin_options_as<StridedSliceOptions>(),
- _problemsOpSet);
+ _opCreator->checkStridedSlice(op->builtin_options_as<StridedSliceOptions>(), _problemsOpSet);
break;
case BuiltinOperator_SHAPE:
_opCreator->checkShape(op->builtin_options_as<ShapeOptions>(), _problemsOpSet);
// No checks
break;
default:
- if (opcode <= BuiltinOperator_MAX) {
- _problemsOpSet.insert(std::string(EnumNameBuiltinOperator(opcode))
- + ": unsupported operator");
- } else {
- _problemsOpSet.insert(std::to_string(opcode)
- + ": unsuppored in tflite custom opcode");
+ if (opcode <= BuiltinOperator_MAX)
+ {
+ _problemsOpSet.insert(std::string(EnumNameBuiltinOperator(opcode)) +
+ ": unsupported operator");
+ }
+ else
+ {
+ _problemsOpSet.insert(std::to_string(opcode) + ": unsuppored in tflite custom opcode");
}
-
}
}
-Graph* TfliteImporter::createIR() {
+Graph *TfliteImporter::createIR()
+{
walkGraphAndCreateMIR();
return _graph;
}
-void TfliteImporter::walkGraphAndCreateMIR() {
+void TfliteImporter::walkGraphAndCreateMIR()
+{
walkModel(_modelPacked);
setIrNodeNames();
setGraphOutputs();
}
-void TfliteImporter::walkModel(const Model* /*m*/) {
- for (auto sub_graph: *(_modelPacked->subgraphs()))
+void TfliteImporter::walkModel(const Model * /*m*/)
+{
+ for (auto sub_graph : *(_modelPacked->subgraphs()))
walkSubGraph(sub_graph);
}
-void TfliteImporter::walkSubGraph(const SubGraph* s) {
+void TfliteImporter::walkSubGraph(const SubGraph *s)
+{
_tensors = s->tensors();
_graphInputs.assign(s->inputs()->begin(), s->inputs()->end());
_graphOutputs.assign(s->outputs()->begin(), s->outputs()->end());
- for (auto i : *s->inputs()) {
- const Tensor* t = (*s->tensors())[i];
+ for (auto i : *s->inputs())
+ {
+ const Tensor *t = (*s->tensors())[i];
mir::Shape input_shape(t->shape()->size());
- for (int dim = 0; dim < t->shape()->size(); ++dim) {
+ for (int dim = 0; dim < t->shape()->size(); ++dim)
+ {
input_shape.dim(dim) = t->shape()->Get(dim);
}
_tensorMap[i] = input->getOutput(0);
}
- for (auto op: *(s->operators()))
+ for (auto op : *(s->operators()))
walkOperator(op);
}
-void TfliteImporter::walkOperator(const Operator* op) {
- std::vector<mir::Operation::Output*> inputs = getMIRInputsForOperator(op);
- std::vector<mir::Operation::Output*> outputs;
+void TfliteImporter::walkOperator(const Operator *op)
+{
+ std::vector<mir::Operation::Output *> inputs = getMIRInputsForOperator(op);
+ std::vector<mir::Operation::Output *> outputs;
BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
- switch (opcode) {
+ switch (opcode)
+ {
case BuiltinOperator_CONV_2D:
outputs = _opCreator->convertConv2D(op->builtin_options_as<Conv2DOptions>(), inputs);
break;
outputs = _opCreator->convertAveragePool2D(op->builtin_options_as<Pool2DOptions>(), inputs);
break;
case BuiltinOperator_CONCATENATION:
- outputs = _opCreator->convertConcatenation(
- op->builtin_options_as<ConcatenationOptions>(), inputs);
+ outputs =
+ _opCreator->convertConcatenation(op->builtin_options_as<ConcatenationOptions>(), inputs);
break;
case BuiltinOperator_RESHAPE:
outputs = _opCreator->convertReshape(op->builtin_options_as<ReshapeOptions>(), inputs);
op->builtin_options_as<ResizeNearestNeighborOptions>(), inputs);
break;
case BuiltinOperator_MEAN:
- outputs = _opCreator->convertMean(
- op->builtin_options_as<ReducerOptions>(), inputs);
+ outputs = _opCreator->convertMean(op->builtin_options_as<ReducerOptions>(), inputs);
break;
case BuiltinOperator_FULLY_CONNECTED:
- outputs = _opCreator->convertFullyConnected(
- op->builtin_options_as<FullyConnectedOptions>(), inputs);
+ outputs = _opCreator->convertFullyConnected(op->builtin_options_as<FullyConnectedOptions>(),
+ inputs);
break;
case BuiltinOperator_SOFTMAX:
outputs = _opCreator->convertSoftmax(op->builtin_options_as<SoftmaxOptions>(), inputs);
outputs = _opCreator->convertSquaredDifference(inputs);
break;
case BuiltinOperator_TRANSPOSE_CONV:
- outputs = _opCreator->convertTransposeConv(op->builtin_options_as<TransposeConvOptions>(),
- inputs);
+ outputs =
+ _opCreator->convertTransposeConv(op->builtin_options_as<TransposeConvOptions>(), inputs);
break;
case BuiltinOperator_PAD:
outputs = _opCreator->convertPad(op->builtin_options_as<PadOptions>(), inputs);
outputs = _opCreator->convertReLU6(inputs);
break;
case BuiltinOperator_TRANSPOSE:
- outputs = _opCreator->convertTranspose(
- op->builtin_options_as<TransposeOptions>(), inputs);
+ outputs = _opCreator->convertTranspose(op->builtin_options_as<TransposeOptions>(), inputs);
break;
case BuiltinOperator_STRIDED_SLICE:
- outputs = _opCreator->convertStridedSlice(
- op->builtin_options_as<StridedSliceOptions>(), inputs);
+ outputs =
+ _opCreator->convertStridedSlice(op->builtin_options_as<StridedSliceOptions>(), inputs);
break;
case BuiltinOperator_LEAKY_RELU:
outputs = _opCreator->convertLeakyReLU(op->builtin_options_as<LeakyReluOptions>(), inputs);
}
assert(outputs.size() == op->outputs()->size());
- for (size_t i = 0; i < op->outputs()->size(); ++i) {
+ for (size_t i = 0; i < op->outputs()->size(); ++i)
+ {
int32_t tensor_index = (*op->outputs())[i];
_tensorMap[tensor_index] = outputs[i];
}
}
-std::vector<mir::Operation::Output*> TfliteImporter::getMIRInputsForOperator(const Operator* op) {
- std::vector<mir::Operation::Output*> inputs;
-
- try {
- for (auto i : *(op->inputs())) {
- const Tensor* tensor = (*_tensors)[i];
- const Buffer* buffer = (*_buffers)[tensor->buffer()];
- if (buffer->data() != nullptr) {
+std::vector<mir::Operation::Output *> TfliteImporter::getMIRInputsForOperator(const Operator *op)
+{
+ std::vector<mir::Operation::Output *> inputs;
+
+ try
+ {
+ for (auto i : *(op->inputs()))
+ {
+ const Tensor *tensor = (*_tensors)[i];
+ const Buffer *buffer = (*_buffers)[tensor->buffer()];
+ if (buffer->data() != nullptr)
+ {
assert(_tensorMap.find(i) == _tensorMap.end());
mir::TensorVariant mir_tensor = createTensor(tensor, buffer);
inputs.emplace_back(_graph->create<ops::ConstantOp>("", mir_tensor)->getOutput(0));
- } else {
+ }
+ else
+ {
// By this point every input for the operation "op" should have corresponding
// Model IR operations that output its inputs. This assumption is provided by the fact
// that TFLite format specifies all operations in the execution order.
inputs.emplace_back(_tensorMap.at(i));
}
}
- } catch (const std::out_of_range& e) {
+ }
+ catch (const std::out_of_range &e)
+ {
throw std::runtime_error("Found a TFLite operator with an input tensor for which "
"a corresponding Model IR node that outputs it was not created.");
}
return inputs;
}
-mir::TensorVariant TfliteImporter::createTensor(const Tensor* t, const Buffer* b) {
+mir::TensorVariant TfliteImporter::createTensor(const Tensor *t, const Buffer *b)
+{
assert(b->data() != nullptr);
mir::DTYPE type;
- switch (t->type()) {
+ switch (t->type())
+ {
case TensorType_INT32:
type = mir::DTYPE::INT32;
break;
type = mir::DTYPE::INT64;
break;
default:
- throw std::runtime_error(std::string("Unsupported tensor type: ") + EnumNameTensorType(t->type()));
+ throw std::runtime_error(std::string("Unsupported tensor type: ") +
+ EnumNameTensorType(t->type()));
}
mir::Shape shape(t->shape()->size());
- for (int i = 0; i < t->shape()->size(); ++i) {
+ for (int i = 0; i < t->shape()->size(); ++i)
+ {
shape.dim(i) = t->shape()->Get(i);
}
return mir::TensorVariant(type, shape, b->data()->Data());
}
-void TfliteImporter::setGraphOutputs() {
- for (auto output_idx : _graphOutputs) {
+void TfliteImporter::setGraphOutputs()
+{
+ for (auto output_idx : _graphOutputs)
+ {
auto output = _tensorMap[output_idx];
_graph->create<mir::ops::OutputOp>(output->getNode()->getName(), output);
output->getNode()->setName("");
}
}
-void TfliteImporter::setIrNodeNames() {
+void TfliteImporter::setIrNodeNames()
+{
// Setting names of the nodes.
// Note: we change the computation graph, (for example, TFLite Conv2D
// turns into IR Conv2D->BiasAdd->ReLU), so not all of the nodes will have names.
- for (auto iter : _tensorMap) {
- const Tensor* tensor = (*_tensors)[iter.first];
+ for (auto iter : _tensorMap)
+ {
+ const Tensor *tensor = (*_tensors)[iter.first];
iter.second->getNode()->setName(tensor->name()->c_str());
}
}
-void TfliteImporter ::cleanup() {
- delete _graph;
-}
+void TfliteImporter::cleanup() { delete _graph; }
-} // namespace nnc
+} // namespace nnc
#include <set>
#include <string>
-namespace nnc {
+namespace nnc
+{
class TFLiteOpCreator;
-class TfliteImporter {
+class TfliteImporter
+{
public:
- explicit TfliteImporter(const std::string& filename);
+ explicit TfliteImporter(const std::string &filename);
/**
- * @brief Import model from file, must be called before 'createIR' method
- * @throw PassException in case, if model couldn't be parsed or NNC doesn't support it
- */
+ * @brief Import model from file, must be called before 'createIR' method
+ * @throw PassException in case, if model couldn't be parsed or NNC doesn't support it
+ */
void import();
/**
- * @brief Create MIR graph from caffe model, must be called after 'import' method
- * @return MIR graph, corresponding to processed caffe model
- */
- mir::Graph* createIR();
+ * @brief Create MIR graph from caffe model, must be called after 'import' method
+ * @return MIR graph, corresponding to processed caffe model
+ */
+ mir::Graph *createIR();
void cleanup();
std::string _filename;
std::unique_ptr<char[]> _modelRaw;
std::unique_ptr<::tflite::ModelT> _model;
- const ::tflite::Model* _modelPacked = nullptr;
+ const ::tflite::Model *_modelPacked = nullptr;
- mir::Graph* _graph = nullptr;
+ mir::Graph *_graph = nullptr;
std::unique_ptr<TFLiteOpCreator> _opCreator;
- const flatbuffers::Vector<flatbuffers::Offset<::tflite::OperatorCode>>* _opcodes = nullptr;
- const flatbuffers::Vector<flatbuffers::Offset<::tflite::Tensor>>* _tensors = nullptr;
- const flatbuffers::Vector<flatbuffers::Offset<::tflite::Buffer>>* _buffers = nullptr;
+ const flatbuffers::Vector<flatbuffers::Offset<::tflite::OperatorCode>> *_opcodes = nullptr;
+ const flatbuffers::Vector<flatbuffers::Offset<::tflite::Tensor>> *_tensors = nullptr;
+ const flatbuffers::Vector<flatbuffers::Offset<::tflite::Buffer>> *_buffers = nullptr;
std::vector<int32_t> _graphInputs;
std::vector<int32_t> _graphOutputs;
// Maps TFLite tensors indices to corresponding MIR operation outputs.
- std::map<int, mir::Operation::Output*> _tensorMap;
+ std::map<int, mir::Operation::Output *> _tensorMap;
// set of strings describing incorrect parts of network and parts of network unsupported by NNC
std::set<std::string> _problemsOpSet;
/**
- * @brief Pass through tflite graph and create MIR graph
- */
+ * @brief Pass through tflite graph and create MIR graph
+ */
void walkGraphAndCreateMIR();
- void walkModel(const ::tflite::Model* m);
+ void walkModel(const ::tflite::Model *m);
- void walkSubGraph(const ::tflite::SubGraph* s);
+ void walkSubGraph(const ::tflite::SubGraph *s);
- void walkOperator(const ::tflite::Operator* op);
+ void walkOperator(const ::tflite::Operator *op);
/**
- * @brief Pass through tflite graph and collect operators unsupported by NNC
- * @throw PassException with message, containing detected problems
- */
+ * @brief Pass through tflite graph and collect operators unsupported by NNC
+ * @throw PassException with message, containing detected problems
+ */
void collectUnsupportedOps();
- void processUnsupportedOp(const ::tflite::Operator* op);
+ void processUnsupportedOp(const ::tflite::Operator *op);
/**
- * @brief Mark output MIR nodes
- */
+ * @brief Mark output MIR nodes
+ */
void setGraphOutputs();
/**
- * @brief Set MIR node names
- */
+ * @brief Set MIR node names
+ */
void setIrNodeNames();
/**
- * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
- */
- std::vector<mir::Operation::Output*> getMIRInputsForOperator(const ::tflite::Operator* op);
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
+ */
+ std::vector<mir::Operation::Output *> getMIRInputsForOperator(const ::tflite::Operator *op);
- mir::TensorVariant createTensor(const ::tflite::Tensor* t,
- const ::tflite::Buffer* b);
+ mir::TensorVariant createTensor(const ::tflite::Tensor *t, const ::tflite::Buffer *b);
};
-} // namespace nnc
+} // namespace nnc
-#endif // NNCC_TFLITE_IMPORTER_H
+#endif // NNCC_TFLITE_IMPORTER_H
using namespace ::tflite;
-namespace nnc {
-
-static void calculatePadding(tflite::Padding padding,
- const Shape& input_shape,
- const Shape& window_shape,
- const Shape& strides,
- std::vector<int32_t>& padding_before,
- std::vector<int32_t>& padding_after) {
- switch (padding) {
+namespace nnc
+{
+
+static void calculatePadding(tflite::Padding padding, const Shape &input_shape,
+ const Shape &window_shape, const Shape &strides,
+ std::vector<int32_t> &padding_before,
+ std::vector<int32_t> &padding_after)
+{
+ switch (padding)
+ {
case tflite::Padding_SAME:
- for (int i = 0; i < 2; ++i) {
+ for (int i = 0; i < 2; ++i)
+ {
int32_t padding;
padding = (input_shape.dim(1 + i) % strides.dim(i) == 0)
- ? std::max(0, window_shape.dim(i) - strides.dim(i))
- : std::max(0, window_shape.dim(i) - input_shape.dim(1 + i) % strides.dim(i));
+ ? std::max(0, window_shape.dim(i) - strides.dim(i))
+ : std::max(0, window_shape.dim(i) - input_shape.dim(1 + i) % strides.dim(i));
padding_before[i] = padding / 2;
padding_after[i] = padding - padding_before[i];
}
break;
case tflite::Padding_VALID:
- for (int i = 0; i < 2; ++i) {
+ for (int i = 0; i < 2; ++i)
+ {
padding_before[i] = 0;
padding_after[i] = 0;
}
}
}
-template<typename VectorT>
-static std::vector<VectorT> convertIntTensorToVector(const mir::Tensor<int32_t>& tensor) {
+template <typename VectorT>
+static std::vector<VectorT> convertIntTensorToVector(const mir::Tensor<int32_t> &tensor)
+{
std::vector<VectorT> v;
- for (const auto& i : mir::ShapeRange(tensor.getShape()))
+ for (const auto &i : mir::ShapeRange(tensor.getShape()))
v.emplace_back(static_cast<VectorT>(tensor.at(i)));
return v;
}
-static const mir::TensorVariant& extractTensor(const mir::Operation::Output* output) {
- auto constant_op = dynamic_cast<const ops::ConstantOp*>(output->getNode());
+static const mir::TensorVariant &extractTensor(const mir::Operation::Output *output)
+{
+ auto constant_op = dynamic_cast<const ops::ConstantOp *>(output->getNode());
if (constant_op == nullptr)
throw std::runtime_error("Non-constant input is not supported.");
return constant_op->getValue();
}
-void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkConv2D(const Conv2DOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
checkActivationType(opts->fused_activation_function(), problems_ops_set);
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertConv2D(const Conv2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertConv2D(const Conv2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto kernel = inputs.at(1);
auto bias = inputs.at(2);
std::vector<int32_t> padding_before(2);
std::vector<int32_t> padding_after(2);
- const auto& input_shape = input->getShape();
- const auto& kernel_shape = kernel->getShape();
+ const auto &input_shape = input->getShape();
+ const auto &kernel_shape = kernel->getShape();
Shape window_shape{kernel_shape.dim(1), kernel_shape.dim(2)};
- calculatePadding(opts->padding(), input_shape, window_shape,
- strides, padding_before, padding_after);
+ calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
+ padding_after);
auto result = createOp<ops::Conv2DOp>(input, kernel, strides, padding_before, padding_after);
result = createOp<ops::BiasAddOp>(result->getOutput(0), bias);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
checkActivationType(opts->fused_activation_function(), problems_ops_set);
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertDepthwiseConv2D(const DepthwiseConv2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto kernel = inputs.at(1);
auto bias = inputs.at(2);
// OHWI -> HWIO
// TODO Insert TransposeOp instead when ACL backend is ready for that.
- const auto& kernel_tensor = mir::transposeTensor<1, 2, 3, 0>(extractTensor(kernel));
+ const auto &kernel_tensor = mir::transposeTensor<1, 2, 3, 0>(extractTensor(kernel));
kernel = createOp<ops::ConstantOp>(kernel_tensor)->getOutput(0);
Shape strides{opts->stride_h(), opts->stride_w()};
std::vector<int32_t> padding_before(2);
std::vector<int32_t> padding_after(2);
- const auto& input_shape = input->getShape();
- const auto& kernel_shape = kernel->getShape();
+ const auto &input_shape = input->getShape();
+ const auto &kernel_shape = kernel->getShape();
Shape window_shape{kernel_shape.dim(0), kernel_shape.dim(1)};
- calculatePadding(opts->padding(), input_shape, window_shape,
- strides, padding_before, padding_after);
+ calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
+ padding_after);
- auto result = createOp<ops::DepthwiseConv2DOp>(input, kernel,
- strides, padding_before, padding_after);
+ auto result =
+ createOp<ops::DepthwiseConv2DOp>(input, kernel, strides, padding_before, padding_after);
result = createOp<ops::BiasAddOp>(result->getOutput(0), bias);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
checkActivationType(opts->fused_activation_function(), problems_ops_set);
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertConcatenation(const ::tflite::ConcatenationOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertConcatenation(const ::tflite::ConcatenationOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::ConcatOp>(inputs, opts->axis());
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkPool2D(const Pool2DOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
checkActivationType(opts->fused_activation_function(), problems_ops_set);
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertMaxPool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMaxPool2D(const ::tflite::Pool2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
- const auto& input_shape = input->getShape();
+ const auto &input_shape = input->getShape();
Shape window_shape{opts->filter_height(), opts->filter_width()};
Shape strides{opts->stride_h(), opts->stride_w()};
std::vector<int32_t> padding_before(2);
std::vector<int32_t> padding_after(2);
- calculatePadding(opts->padding(), input_shape, window_shape,
- strides, padding_before, padding_after);
+ calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
+ padding_after);
- auto result = createOp<ops::PoolOp>(input, ops::PoolOp::PoolingType::MAX,
- window_shape, strides, padding_before, padding_after,
- ops::PoolOp::BorderType::EMPTY);
+ auto result =
+ createOp<ops::PoolOp>(input, ops::PoolOp::PoolingType::MAX, window_shape, strides,
+ padding_before, padding_after, ops::PoolOp::BorderType::EMPTY);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertAveragePool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertAveragePool2D(const ::tflite::Pool2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
- const auto& input_shape = input->getShape();
+ const auto &input_shape = input->getShape();
Shape window_shape{opts->filter_height(), opts->filter_width()};
Shape strides{opts->stride_h(), opts->stride_w()};
std::vector<int32_t> padding_before(2);
std::vector<int32_t> padding_after(2);
- calculatePadding(opts->padding(), input_shape, window_shape,
- strides, padding_before, padding_after);
+ calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
+ padding_after);
- auto result = createOp<ops::PoolOp>(input, ops::PoolOp::PoolingType::AVG,
- window_shape, strides, padding_before, padding_after,
- ops::PoolOp::BorderType::EMPTY);
+ auto result =
+ createOp<ops::PoolOp>(input, ops::PoolOp::PoolingType::AVG, window_shape, strides,
+ padding_before, padding_after, ops::PoolOp::BorderType::EMPTY);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertSoftmax(const ::tflite::SoftmaxOptions* /*opts*/,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSoftmax(const ::tflite::SoftmaxOptions * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
// Softmax in TFLite is always 2-D.
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertSlice(const ::tflite::SliceOptions* /*opts*/,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSlice(const ::tflite::SliceOptions * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(2)));
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertReshape(const ::tflite::ReshapeOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertReshape(const ::tflite::ReshapeOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
// TODO: we should also support "-1" values in new_shape, which means that correct
// shape values must be calculated. Better do it in the shape inference module.
Shape new_shape(opts->new_shape()->size());
- for (int i = 0; i < opts->new_shape()->size(); ++i) {
+ for (int i = 0; i < opts->new_shape()->size(); ++i)
+ {
new_shape.dim(i) = opts->new_shape()->Get(i);
}
auto result = createOp<ops::ReshapeOp>(input, new_shape);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertTransposeConv(const ::tflite::TransposeConvOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertTransposeConv(const ::tflite::TransposeConvOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
mir::Tensor<int32_t> output_shape_tensor(extractTensor(inputs.at(0)));
auto kernel = inputs.at(1);
auto input = inputs.at(2);
// OHWI -> HWOI
// TODO Insert TransposeOp instead when ACL backend is ready for that.
- const auto& kernel_tensor = mir::transposeTensor<1, 2, 0, 3>(extractTensor(kernel));
+ const auto &kernel_tensor = mir::transposeTensor<1, 2, 0, 3>(extractTensor(kernel));
kernel = createOp<ops::ConstantOp>(kernel_tensor)->getOutput(0);
- auto result = createOp<ops::DeConv2DOp>(input, kernel,
- strides, paddingMap[opts->padding()], output_shape);
+ auto result =
+ createOp<ops::DeConv2DOp>(input, kernel, strides, paddingMap[opts->padding()], output_shape);
return {result->getOutput(0)};
}
-void TFLiteOpCreator::checkResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
if (opts->align_corners())
problems_ops_set.insert("'align_corners' is not currently supported");
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertResizeNearestNeighbor(
- const ::tflite::ResizeNearestNeighborOptions* /*opts*/,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *> TFLiteOpCreator::convertResizeNearestNeighbor(
+ const ::tflite::ResizeNearestNeighborOptions * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(1)));
- const auto& input_shape = input->getShape();
- Shape res_shape{input_shape.dim(0),
- size_tensor.at(mir::Index{0}),
- size_tensor.at(mir::Index{1}),
+ const auto &input_shape = input->getShape();
+ Shape res_shape{input_shape.dim(0), size_tensor.at(mir::Index{0}), size_tensor.at(mir::Index{1}),
input_shape.dim(3)};
- auto result = createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor,
- res_shape);
+ auto result =
+ createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertAdd(const ::tflite::AddOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertAdd(const ::tflite::AddOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::add);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertSub(const ::tflite::SubOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSub(const ::tflite::SubOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertMul(const ::tflite::MulOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMul(const ::tflite::MulOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
// Try to constant fold the operation in some cases.
if (inputs.size() == 2 && inputs[0]->getShape() == inputs[1]->getShape() &&
- opts->fused_activation_function() == ActivationFunctionType_NONE) {
- auto constant1_op = dynamic_cast<const ops::ConstantOp*>(inputs[0]->getNode());
- auto constant2_op = dynamic_cast<const ops::ConstantOp*>(inputs[1]->getNode());
- if (constant1_op != nullptr && constant2_op != nullptr) {
- const auto& input1_tensor = constant1_op->getValue();
- const auto& input2_tensor = constant2_op->getValue();
+ opts->fused_activation_function() == ActivationFunctionType_NONE)
+ {
+ auto constant1_op = dynamic_cast<const ops::ConstantOp *>(inputs[0]->getNode());
+ auto constant2_op = dynamic_cast<const ops::ConstantOp *>(inputs[1]->getNode());
+ if (constant1_op != nullptr && constant2_op != nullptr)
+ {
+ const auto &input1_tensor = constant1_op->getValue();
+ const auto &input2_tensor = constant2_op->getValue();
if (input1_tensor.getDataType() == mir::DTYPE::INT32 &&
- input2_tensor.getDataType() == mir::DTYPE::INT32) {
- const auto& output_shape = inputs[0]->getShape();
+ input2_tensor.getDataType() == mir::DTYPE::INT32)
+ {
+ const auto &output_shape = inputs[0]->getShape();
mir::TensorVariant res_tensor(mir::DTYPE::INT32, output_shape);
mir::Tensor<int32_t> input1_accessor(input1_tensor);
mir::Tensor<int32_t> input2_accessor(input2_tensor);
mir::Tensor<int32_t> res_accessor(res_tensor);
- for (const auto& idx : mir::ShapeRange(output_shape)) {
+ for (const auto &idx : mir::ShapeRange(output_shape))
+ {
res_accessor.at(idx) = input1_accessor.at(idx) * input2_accessor.at(idx);
}
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertDiv(const ::tflite::DivOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertDiv(const ::tflite::DivOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::div);
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::max);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output *> &inputs)
+{
auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub);
- result = createOp<ops::ElementwiseOp>(std::vector<mir::Operation::Output*>{
- result->getOutput(0),
- result->getOutput(0)},
- ops::ElementwiseOp::OpType::mul);
+ result = createOp<ops::ElementwiseOp>(
+ std::vector<mir::Operation::Output *>{result->getOutput(0), result->getOutput(0)},
+ ops::ElementwiseOp::OpType::mul);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertMean(const ::tflite::ReducerOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertMean(const ::tflite::ReducerOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
mir::Tensor<int32_t> axes_tensor(extractTensor(inputs.at(1)));
std::vector<int32_t> axes = convertIntTensorToVector<int32_t>(axes_tensor);
- auto result = createOp<ops::ReduceOp>(input, axes, opts->keep_dims(),
- ops::ReduceOp::FuncType::mean);
+ auto result =
+ createOp<ops::ReduceOp>(input, axes, opts->keep_dims(), ops::ReduceOp::FuncType::mean);
return {result->getOutput(0)};
}
-void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
checkActivationType(opts->fused_activation_function(), problems_ops_set);
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertFullyConnected(const ::tflite::FullyConnectedOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertFullyConnected(const ::tflite::FullyConnectedOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto weights = inputs.at(1);
auto bias = inputs.at(2);
// Flatten input to 2-D shape.
- const auto& input_shape = input->getShape();
+ const auto &input_shape = input->getShape();
int32_t outer_size = input_shape.dim(0);
int32_t inner_size = input_shape.numElements() / outer_size;
auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size});
// TODO Insert TransposeOp instead when ACL backend is ready for that.
- const auto& weights_tensor = mir::transposeTensor<1, 0>(extractTensor(weights));
+ const auto &weights_tensor = mir::transposeTensor<1, 0>(extractTensor(weights));
weights = createOp<ops::ConstantOp>(weights_tensor)->getOutput(0);
auto result = createOp<ops::FullyConnectedOp>(flatten->getOutput(0), weights);
}
void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type,
- std::set<std::string>& problems_ops_set) {
+ std::set<std::string> &problems_ops_set)
+{
if (activation_type != ActivationFunctionType_NONE &&
activation_type != ActivationFunctionType_RELU &&
activation_type != ActivationFunctionType_RELU6 &&
activation_type != ActivationFunctionType_TANH)
- problems_ops_set.insert(std::string("Unsupported activation type: ")
- + EnumNameActivationFunctionType(activation_type));
+ problems_ops_set.insert(std::string("Unsupported activation type: ") +
+ EnumNameActivationFunctionType(activation_type));
}
-mir::Operation::Output*
-TFLiteOpCreator::addFusedActivation(mir::Operation::Output* input,
- ActivationFunctionType activation_type) {
+mir::Operation::Output *TFLiteOpCreator::addFusedActivation(mir::Operation::Output *input,
+ ActivationFunctionType activation_type)
+{
// TODO Support other activation function types.
- switch (activation_type) {
+ switch (activation_type)
+ {
case ActivationFunctionType_NONE:
return input;
case ActivationFunctionType_RELU:
}
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
- std::vector<int32_t> squeeze_dims(opts->squeeze_dims()->begin(),
- opts->squeeze_dims()->end());
+ std::vector<int32_t> squeeze_dims(opts->squeeze_dims()->begin(), opts->squeeze_dims()->end());
auto result = createOp<ops::SqueezeOp>(input, squeeze_dims);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertPad(const ::tflite::PadOptions* /*opts*/,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertPad(const ::tflite::PadOptions * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
- const auto& input_shape = input->getShape();
+ const auto &input_shape = input->getShape();
int32_t num_dims = input_shape.rank();
std::vector<std::pair<int32_t, int32_t>> paddings;
paddings.reserve(static_cast<uint64_t>(num_dims));
- for (int axis = 0; axis < num_dims; axis++) {
+ for (int axis = 0; axis < num_dims; axis++)
+ {
paddings.emplace_back(paddings_tensor.at(mir::Index({axis, 0})),
paddings_tensor.at(mir::Index({axis, 1})));
}
float filler_value = 0.0;
- mir::Scalar filler(reinterpret_cast<char*>(&filler_value),
- mir::DTYPE::FLOAT32, sizeof(filler_value));
+ mir::Scalar filler(reinterpret_cast<char *>(&filler_value), mir::DTYPE::FLOAT32,
+ sizeof(filler_value));
// FIXME Do we really need num_dims as an argument? It looks redundant.
auto result = createOp<ops::PadOp>(input, num_dims, paddings, filler);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertTanh(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertTanh(const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto result = createOp<ops::TanhOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertReLU(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertReLU(const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto result = createOp<ops::ReluOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertReLU6(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertReLU6(const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto result = createOp<ops::CappedReluOp>(input, 6);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertSqrt(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertSqrt(const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto result = createOp<ops::SqrtOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertLogistic(const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertLogistic(const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto result = createOp<ops::SigmoidOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertTranspose(const ::tflite::TransposeOptions* /*opts*/,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertTranspose(const ::tflite::TransposeOptions * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
mir::Tensor<int32_t> perm_tensor(extractTensor(inputs.at(1)));
return {result->getOutput(0)};
}
-void TFLiteOpCreator::checkStridedSlice(const ::tflite::StridedSliceOptions* opts,
- std::set<std::string>& problems_ops_set) {
+void TFLiteOpCreator::checkStridedSlice(const ::tflite::StridedSliceOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
if (opts->ellipsis_mask() != 0)
problems_ops_set.insert("StridedSlice: parameter 'ellipsis_mask' is not supported.");
problems_ops_set.insert("StridedSlice: parameter 'new_axis_mask' is not supported.");
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertStridedSlice(const ::tflite::StridedSliceOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertStridedSlice(const ::tflite::StridedSliceOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
mir::Tensor<int32_t> end_tensor(extractTensor(inputs.at(2)));
int32_t end_mask = opts->end_mask();
int32_t shrink_axis_mask = opts->shrink_axis_mask();
- const auto& input_shape = input->getShape();
+ const auto &input_shape = input->getShape();
int32_t num_dims = input_shape.rank();
- for (int32_t stride : strides) {
+ for (int32_t stride : strides)
+ {
if (stride != 1)
throw std::runtime_error("StridedSlice: parameter 'strides' is not supported");
}
Shape start(num_dims);
Shape size(num_dims);
std::vector<int32_t> squeeze_dims;
- for (int axis = 0; axis < num_dims; axis++) {
+ for (int axis = 0; axis < num_dims; axis++)
+ {
if (static_cast<uint32_t>(begin_mask) & (1u << static_cast<uint32_t>(axis)))
start.dim(axis) = 0;
else
size.dim(axis) = input_shape.dim(axis) - start.dim(axis);
else
size.dim(axis) = end.at(static_cast<uint64_t>(axis)) - start.dim(axis);
-
+
if (static_cast<uint32_t>(shrink_axis_mask) & (1u << static_cast<uint32_t>(axis)))
squeeze_dims.push_back(axis);
}
// Try to constant fold the operation in some cases.
- if (shrink_axis_mask == 0) {
- auto constant_op = dynamic_cast<const ops::ConstantOp*>(input->getNode());
- if (constant_op != nullptr) {
- const auto& input_tensor = constant_op->getValue();
- if (input_tensor.getDataType() == mir::DTYPE::INT32) {
+ if (shrink_axis_mask == 0)
+ {
+ auto constant_op = dynamic_cast<const ops::ConstantOp *>(input->getNode());
+ if (constant_op != nullptr)
+ {
+ const auto &input_tensor = constant_op->getValue();
+ if (input_tensor.getDataType() == mir::DTYPE::INT32)
+ {
mir::Shape output_shape(num_dims);
- for (int32_t i = 0; i < num_dims; ++i) {
- if (size.dim(i) == -1) {
+ for (int32_t i = 0; i < num_dims; ++i)
+ {
+ if (size.dim(i) == -1)
+ {
output_shape.dim(i) = input_shape.dim(i) - start.dim(i);
- } else {
+ }
+ else
+ {
output_shape.dim(i) = size.dim(i);
}
}
mir::Tensor<int32_t> res_accessor(res_tensor);
mir::Index in_idx(static_cast<std::size_t>(num_dims));
- for (const auto& out_idx : mir::ShapeRange(output_shape)) {
- for (int32_t i = 0; i < num_dims; ++i) {
+ for (const auto &out_idx : mir::ShapeRange(output_shape))
+ {
+ for (int32_t i = 0; i < num_dims; ++i)
+ {
in_idx.at(i) = out_idx.at(i) + start.dim(i);
}
res_accessor.at(out_idx) = input_accessor.at(in_idx);
return {result->getOutput(0)};
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs) {
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertLeakyReLU(const ::tflite::LeakyReluOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
auto input = inputs.at(0);
auto result = createOp<ops::LeakyReluOp>(input, opts->alpha());
return {result->getOutput(0)};
}
-void TFLiteOpCreator::checkShape(const ::tflite::ShapeOptions* opts,
- std::set<std::string>& problems_ops_set) {
- if (opts->out_type() != TensorType_INT32) {
+void TFLiteOpCreator::checkShape(const ::tflite::ShapeOptions *opts,
+ std::set<std::string> &problems_ops_set)
+{
+ if (opts->out_type() != TensorType_INT32)
+ {
problems_ops_set.insert(std::string("SHAPE: Unsupported tensor type: ") +
EnumNameTensorType(opts->out_type()));
}
}
-std::vector<mir::Operation::Output*>
-TFLiteOpCreator::convertShape(const ::tflite::ShapeOptions* /*opts*/,
- const std::vector<mir::Operation::Output*>& inputs) {
- const auto& input_shape = inputs[0]->getShape();
+std::vector<mir::Operation::Output *>
+TFLiteOpCreator::convertShape(const ::tflite::ShapeOptions * /*opts*/,
+ const std::vector<mir::Operation::Output *> &inputs)
+{
+ const auto &input_shape = inputs[0]->getShape();
int32_t rank = input_shape.rank();
Shape output_shape{rank};
std::vector<int32_t> data;
#include <set>
#include <vector>
-namespace nnc {
+namespace nnc
+{
namespace ops = mir::ops;
using mir::Graph;
using mir::Shape;
-class TFLiteOpCreator {
+class TFLiteOpCreator
+{
public:
- explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
+ explicit TFLiteOpCreator(Graph *g) : _graph(g) {}
- std::vector<mir::Operation::Output*>
- convertConv2D(const ::tflite::Conv2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertConv2D(const ::tflite::Conv2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertConcatenation(const ::tflite::ConcatenationOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertConcatenation(const ::tflite::ConcatenationOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertMaxPool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertMaxPool2D(const ::tflite::Pool2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertAveragePool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertAveragePool2D(const ::tflite::Pool2DOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertMean(const ::tflite::ReducerOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertMean(const ::tflite::ReducerOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSoftmax(const ::tflite::SoftmaxOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSoftmax(const ::tflite::SoftmaxOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSlice(const ::tflite::SliceOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSlice(const ::tflite::SliceOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertReshape(const ::tflite::ReshapeOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertReshape(const ::tflite::ReshapeOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertFullyConnected(const ::tflite::FullyConnectedOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertFullyConnected(const ::tflite::FullyConnectedOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertLogistic(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertLogistic(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSqrt(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSqrt(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSqueeze(const ::tflite::SqueezeOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSqueeze(const ::tflite::SqueezeOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertAdd(const ::tflite::AddOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertAdd(const ::tflite::AddOptions *opts, const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSub(const ::tflite::SubOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSub(const ::tflite::SubOptions *opts, const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertMul(const ::tflite::MulOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertMul(const ::tflite::MulOptions *opts, const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertDiv(const ::tflite::DivOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertDiv(const ::tflite::DivOptions *opts, const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertMax(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertMax(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertSquaredDifference(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertSquaredDifference(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertTanh(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertTanh(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertReLU(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertReLU(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertReLU6(const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertReLU6(const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertTransposeConv(const ::tflite::TransposeConvOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertTransposeConv(const ::tflite::TransposeConvOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertPad(const ::tflite::PadOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertPad(const ::tflite::PadOptions *opts, const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertTranspose(const ::tflite::TransposeOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertTranspose(const ::tflite::TransposeOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertStridedSlice(const ::tflite::StridedSliceOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertStridedSlice(const ::tflite::StridedSliceOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertLeakyReLU(const ::tflite::LeakyReluOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- std::vector<mir::Operation::Output*>
- convertShape(const ::tflite::ShapeOptions* opts,
- const std::vector<mir::Operation::Output*>& inputs);
+ std::vector<mir::Operation::Output *>
+ convertShape(const ::tflite::ShapeOptions *opts,
+ const std::vector<mir::Operation::Output *> &inputs);
- void checkPool2D(const ::tflite::Pool2DOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkPool2D(const ::tflite::Pool2DOptions *opts, std::set<std::string> &problems_ops_set);
- void checkConcatenation(const ::tflite::ConcatenationOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkConcatenation(const ::tflite::ConcatenationOptions *opts,
+ std::set<std::string> &problems_ops_set);
- void checkConv2D(const ::tflite::Conv2DOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkConv2D(const ::tflite::Conv2DOptions *opts, std::set<std::string> &problems_ops_set);
- void checkDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions *opts,
+ std::set<std::string> &problems_ops_set);
- void checkFullyConnected(const ::tflite::FullyConnectedOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkFullyConnected(const ::tflite::FullyConnectedOptions *opts,
+ std::set<std::string> &problems_ops_set);
- void checkResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions *opts,
+ std::set<std::string> &problems_ops_set);
- void checkStridedSlice(const ::tflite::StridedSliceOptions* opts,
- std::set<std::string>& problems_ops_set);
+ void checkStridedSlice(const ::tflite::StridedSliceOptions *opts,
+ std::set<std::string> &problems_ops_set);
+
+ void checkShape(const ::tflite::ShapeOptions *opts, std::set<std::string> &problems_ops_set);
- void checkShape(const ::tflite::ShapeOptions* opts,
- std::set<std::string>& problems_ops_set);
private:
- Graph* _graph;
+ Graph *_graph;
std::map<::tflite::Padding, ops::PaddingType> paddingMap = {
- {::tflite::Padding_SAME, ops::PaddingType::Same},
- {::tflite::Padding_VALID, ops::PaddingType::Valid}};
+ {::tflite::Padding_SAME, ops::PaddingType::Same},
+ {::tflite::Padding_VALID, ops::PaddingType::Valid}};
void checkActivationType(::tflite::ActivationFunctionType activation_type,
- std::set<std::string>& problems_ops_set);
+ std::set<std::string> &problems_ops_set);
- mir::Operation::Output* addFusedActivation(mir::Operation::Output* input,
+ mir::Operation::Output *addFusedActivation(mir::Operation::Output *input,
::tflite::ActivationFunctionType activation_type);
- template<typename OpType, typename... Types>
- mir::Operation* createOp(Types&&... args);
-
+ template <typename OpType, typename... Types> mir::Operation *createOp(Types &&... args);
};
-template<typename OpType, typename... Types>
-mir::Operation* TFLiteOpCreator::createOp(Types&& ... args) {
+template <typename OpType, typename... Types>
+mir::Operation *TFLiteOpCreator::createOp(Types &&... args)
+{
// TODO: how to name operations? in Tensorflow tensors get names, not operations
return _graph->create<OpType>("", std::forward<Types>(args)...);
}