#include "ONNXHelpers.h"
#include "ONNXOpRegistration.h"
-#include "mir/IrDotDumper.h"
-#include "mir/ops/ConstantOp.h"
-#include "mir/ops/Conv2DOp.h"
-#include "mir/ops/DepthwiseConv2DOp.h"
-#include "mir/ops/ElementwiseOp.h"
-#include "mir/ops/InputOp.h"
-#include "mir/ops/OutputOp.h"
-#include "mir/ops/TransposeOp.h"
#include "mir/Operation.h"
#include "mir/Shape.h"
#include "mir/TensorUtil.h"
#include "mir/TensorVariant.h"
-#include "onnx/onnx.pb.h"
+
+#include "mir/ops/ConstantOp.h"
#include <fcntl.h>
const auto &onnx_node = _model->graph().node(i);
assert(onnx_node.has_op_type());
const auto &op_type = onnx_node.op_type();
+
const auto *converter = NodeConverterRegistry::getInstance().lookup(op_type);
if (converter == nullptr)
problems_op_set.insert(op_type);
void ONNXImporterImpl::createGraphInputs()
{
- auto &graph = _model->graph();
- auto &initializer = graph.initializer();
- auto &value_info = graph.value_info();
- std::map<std::string, const onnx::TensorProto *> onnx_tensors;
+ const auto &graph = _model->graph();
+ const auto &initializer = graph.initializer();
+ const auto &value_info = graph.value_info();
- // Collect all initializers of the given graph
- for (int i = 0; i < graph.initializer_size(); i++)
+ // Create all initializer Tensors
+ for (const auto &tensor : initializer)
{
- const onnx::TensorProto &tensor = graph.initializer(i);
- assert(onnx_tensors.find(tensor.name()) == onnx_tensors.end());
- onnx_tensors[tensor.name()] = &tensor;
+ assert(tensor.has_name());
+ const auto mir_tensor = createTensor(&tensor);
+ auto *op = _graph->create<mir::ops::ConstantOp>(tensor.name(), mir_tensor);
+ _tensorNameToOutput.emplace(tensor.name(), op->getOutput(0));
}
for (auto &input : graph.input())
{
assert(input.has_name());
- auto name = input.name();
- if (onnx_tensors.find(name) != onnx_tensors.end())
- {
- const onnx::TensorProto *onnx_tensor = onnx_tensors[name];
- _constantTensors.insert(std::make_pair(name, createTensor(onnx_tensor)));
- auto constant = _graph->create<mir::ops::ConstantOp>(name, _constantTensors.at(name));
- _tensorNameToOutput[name] = constant->getOutput(0);
- }
- else
+ if (_tensorNameToOutput.find(input.name()) == _tensorNameToOutput.end())
{
const auto &onnx_input_shape = input.type().tensor_type().shape();
mir::Shape shape(onnx_input_shape.dim_size());
assert(onnx_input_shape.dim(i).has_dim_value());
shape.dim(i) = static_cast<int32_t>(onnx_input_shape.dim(i).dim_value());
}
- // TODO: Temporary solution!
- auto node = _graph->create<mir::ops::InputOp>(name, shape);
- _tensorNameToOutput[name] = node->getOutput(0);
+
+ auto *op = _graph->create<mir::ops::InputOp>(input.name(), shape);
+ _tensorNameToOutput.emplace(input.name(), op->getOutput(0));
}
}
}
{
createGraphInputs();
- // for all nodes in onnx graph
+ // Forming partially ordered computation graph
for (auto &onnx_node : _model->graph().node())
{
assert(onnx_node.has_op_type());
- auto op_type = onnx_node.op_type().c_str();
- // Fill inputs of the given node
- std::vector<mir::Operation::Output *> inputs(onnx_node.input_size());
- std::vector<mir::Operation::Output *> outputs;
+ auto &op_type = onnx_node.op_type();
+ auto &inputs = onnx_node.input();
+
+ std::vector<mir::Operation::Output *> mir_inputs;
+ std::vector<mir::Operation::Output *> mir_outputs;
- for (int i = 0; i < onnx_node.input_size(); i++)
+ for (const auto &input_name : inputs)
{
- auto &name = onnx_node.input(i);
- if (!name.empty())
+ if (!input_name.empty())
{
- assert(_tensorNameToOutput.find(name) != _tensorNameToOutput.end());
- inputs[i] = _tensorNameToOutput[name];
+ const auto mir_op_iter = _tensorNameToOutput.find(input_name);
+ assert(mir_op_iter != _tensorNameToOutput.end());
+ mir_inputs.emplace_back(mir_op_iter->second);
}
}
-
+ // Get converter
const auto *node_converter = NodeConverterRegistry::getInstance().lookup(op_type);
- outputs = node_converter->convert(onnx_node, inputs, _graph.get());
- assert(!outputs.empty());
+ assert(node_converter);
+ mir_outputs = node_converter->convert(onnx_node, mir_inputs, _graph.get());
+ assert(!mir_outputs.empty());
// Set outputs' names
- for (int i = 0; i < outputs.size(); i++)
+ for (int i = 0; i < mir_outputs.size(); i++)
{
- outputs[i]->getNode()->setName(onnx_node.output(i));
- auto result = _tensorNameToOutput.emplace(outputs[i]->getNode()->getName(), outputs[i]);
+ mir_outputs[i]->getNode()->setName(onnx_node.output(i));
+ auto result = _tensorNameToOutput.emplace(onnx_node.output(i), mir_outputs[i]);
if (!result.second)
- throw std::runtime_error("Name duplication: " + outputs[i]->getNode()->getName());
+ throw std::runtime_error("Name duplication: " + mir_outputs[i]->getNode()->getName());
}
- assert(!outputs.empty());
- // FIXME: it should be done properly via the given graph outputs
- _graphOutputs.assign(outputs.begin(), outputs.end());
}
- // set graph outputs
- // TODO: it should be done with onnx graph outputs
- for (auto output : _graphOutputs)
+ // Set graph outputs
+ const auto &outputs = _model->graph().output();
+ for (const auto &output : outputs)
{
- _graph->create<mir::ops::OutputOp>(output->getNode()->getName(), output);
- output->getNode()->setName("");
+ assert(output.has_name());
+ auto output_iter = _tensorNameToOutput.find(output.name());
+ if (output_iter == _tensorNameToOutput.end())
+ throw std::runtime_error("Bad output name!");
+
+ _graph->create<mir::ops::OutputOp>(output.name(), output_iter->second);
}
return std::move(_graph);