* Remove `IODescriptor` type alias.
* Add non-const variants of `getNode` method to `Operation::Output` and `Operation::Input` classes.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
Operation* src_node = q.front();
q.pop_front();
src_node->accept(visitor);
- for (const auto& src_output : src_node->getOutputs()) {
- for (const auto* consumer : src_output.getConsumers()) {
+ for (auto& src_output : src_node->getOutputs()) {
+ for (auto* consumer : src_output.getConsumers()) {
Operation* dst_node = consumer->getNode();
if (known_ops.count(dst_node) == 0) {
bool allInputsResolved = true;
- for (const auto& dst_input : dst_node->getInputs()) {
+ for (auto& dst_input : dst_node->getInputs()) {
if (known_ops.count(dst_input.getProducer()->getNode()) == 0) {
allInputsResolved = false;
}
std::vector<std::pair<Operation*, Operation*>> matches;
for (auto* start: _g->getNodes()) {
if (p1(start)) {
- for (const auto& out : start->getOutputs()) {
- for (const auto* consumer : out.getConsumers()) {
+ for (auto& out : start->getOutputs()) {
+ for (auto* consumer : out.getConsumers()) {
Operation* end = consumer->getNode();
if (p2(end)) {
matches.emplace_back(std::make_pair(start, end));
namespace nnc {
namespace mir {
-Operation::Operation(Type type, const std::vector<IODescriptor>& inputs, std::size_t num_outputs)
+Operation::Operation(Type type, const std::vector<Output*>& inputs, std::size_t num_outputs)
: _type(type) {
for (std::size_t i = 0; i < inputs.size(); ++i) {
_inputs.emplace_back(this, i, inputs[i]);
#include "core/modelIR/ir_dot_builder.h"
-namespace nnc
-{
-namespace mir
-{
+namespace nnc {
+namespace mir {
-void IrDotBuilder::updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo)
-{
+void IrDotBuilder::updateWithOp(const Operation* op, const DotIrNodeInfo& irNodeInfo) {
addNode(op, irNodeInfo);
- for (auto &prev : op->getInputs())
- {
+ for (auto& prev : op->getInputs()) {
addEdge(prev.getProducer()->getNode(), op);
}
}
-void IrDotBuilder::writeDot(std::ostream &os)
-{
+void IrDotBuilder::writeDot(std::ostream& os) {
os << "digraph D {" << std::endl << dot.str() << std::endl << "}" << std::endl;
}
-void IrDotBuilder::addNode(Operation* op, const DotIrNodeInfo& irNode)
-{
+void IrDotBuilder::addNode(const Operation* op, const DotIrNodeInfo& irNode) {
dot << op->getId() << " [shape=record label=\"" << irNode.getLabel() << "\"];" << std::endl;
}
-void IrDotBuilder::addEdge(Operation* op1, Operation* op2)
-{
+void IrDotBuilder::addEdge(const Operation* op1, const Operation* op2) {
dot << op1->getId() << " -> " << op2->getId() << ";" << std::endl;
}
namespace mir {
namespace ops {
-TransposeOp::TransposeOp(const IODescriptor& arg, const std::vector<std::size_t>& axis_order)
+TransposeOp::TransposeOp(Output* arg, const std::vector<std::size_t>& axis_order)
: Operation(Type::transpose, {arg}), _axisOrder(axis_order) {
assert(_axisOrder.size() == static_cast<std::size_t>(getInputShape(0).rank()));
inferOutputShapes();
Output& operator=(Output&&) = delete;
/// @brief Returns the node this is an output of.
- Operation* getNode() const { return _node; }
+ Operation* getNode() { return _node; }
+ const Operation* getNode() const { return _node; }
/// @brief Returns the index of this output among all the ouptputs of the node.
std::size_t getIndex() const { return _index; }
Input& operator=(Input&&) = delete;
/// @brief Returns the node this is the input of.
- Operation* getNode() const { return _node; }
+ Operation* getNode() { return _node; }
+ const Operation* getNode() const { return _node; }
/// @brief Returns the index of this output among all the inputs of the node.
std::size_t getIndex() const { return _index; }
std::deque<Output> _outputs;
};
-// Convenient type alias for the duration of the transition process.
-using IODescriptor = Operation::Output*;
-
} // namespace mir
} // namespace nnc
public:
explicit IrDotBuilder() = default;
- void updateWithOp(Operation* op, const DotIrNodeInfo& irNodeInfo);
+ void updateWithOp(const Operation* op, const DotIrNodeInfo& irNodeInfo);
void writeDot(std::ostream &os);
private:
- void addNode(Operation* op, const DotIrNodeInfo& irNode);
- void addEdge(Operation* op1, Operation* op2);
+ void addNode(const Operation* op, const DotIrNodeInfo& irNode);
+ void addEdge(const Operation* op1, const Operation* op2);
std::stringstream dot;
};
class BatchNormOp : public Operation {
public:
- BatchNormOp(const IODescriptor& arg, float movingAvgFraction, float eps, bool spatial)
+ BatchNormOp(Output* arg, float movingAvgFraction, float eps, bool spatial)
: Operation(Type::batchNorm, {arg}), _movingAvgFraction(movingAvgFraction), _eps(eps),
_spatial(spatial) {
// Infer output shape.
class BiasAddOp : public Operation {
public:
- BiasAddOp(const IODescriptor& arg1, const IODescriptor& arg2)
- : Operation(Type::biasAdd, {arg1, arg2}) {
+ BiasAddOp(Output* arg1, Output* arg2) : Operation(Type::biasAdd, {arg1, arg2}) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
class CappedReluOp : public Operation {
public:
- CappedReluOp(const IODescriptor& arg, float cap)
- : Operation(Type::cappedReLU, {arg}), _cap(cap) {
+ CappedReluOp(Output* arg, float cap) : Operation(Type::cappedReLU, {arg}), _cap(cap) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
*/
class ConcatOp : public Operation {
public:
- ConcatOp(const std::vector<IODescriptor>& args, int32_t axis)
+ ConcatOp(const std::vector<Output*>& args, int32_t axis)
: Operation(Type::concat, args), _axis(axis) {
inferOutputShapes();
}
class Conv2DOp : public Operation {
public:
- Conv2DOp(const IODescriptor& input,
- const IODescriptor& kernel,
+ Conv2DOp(Output* input,
+ Output* kernel,
const Shape& strides,
const std::vector<int32_t>& padding_before,
const std::vector<int32_t>& padding_after)
class DeConv2DOp : public Operation {
public:
- DeConv2DOp(const IODescriptor& input,
- const IODescriptor& kernel,
+ DeConv2DOp(Output* input,
+ Output* kernel,
const Shape& strides,
const std::vector<int32_t>& paddings)
: Operation(Type::deConv2D, {input, kernel}),
inferOutputShapes();
}
- DeConv2DOp(const IODescriptor& input,
- const IODescriptor& kernel,
+ DeConv2DOp(Output* input,
+ Output* kernel,
const Shape& strides,
PaddingType padding_type)
: Operation(Type::deConv2D, {input, kernel}),
inferOutputShapes();
}
- DeConv2DOp(const IODescriptor& input,
- const IODescriptor& kernel,
+ DeConv2DOp(Output* input,
+ Output* kernel,
const Shape& strides,
PaddingType padding_type,
const Shape& output_shape)
class DepthwiseConv2DOp : public Operation {
public:
- DepthwiseConv2DOp(const IODescriptor& input,
- const IODescriptor& kernel,
+ DepthwiseConv2DOp(Output* input,
+ Output* kernel,
const Shape& strides,
const std::vector<int32_t>& padding_before,
const std::vector<int32_t>& padding_after)
class DropoutOp : public Operation {
public:
- DropoutOp(const IODescriptor& arg, float rate) : Operation(Type::dropout, {arg}), _rate(rate) {
+ DropoutOp(Output* arg, float rate) : Operation(Type::dropout, {arg}), _rate(rate) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
* @param op_type Type of operation to perform
* @param num_inputs Number of inputs
*/
- ElementwiseOp(const std::vector<IODescriptor>& args, OpType op_type)
- : Operation(Type::elementwise, args), _opType(op_type), _needsBroadcast(false) {
+ ElementwiseOp(const std::vector<Output*>& args, OpType op_type)
+ : Operation(Type::elementwise, args), _opType(op_type), _needsBroadcast(false) {
inferOutputShapes();
};
class EluOp : public Operation {
public:
- EluOp(const IODescriptor& arg, float alpha) : Operation(Type::ELU, {arg}), _alpha(alpha) {
+ EluOp(Output* arg, float alpha) : Operation(Type::ELU, {arg}), _alpha(alpha) {
setOutputShape(0, getInputShape(0));
}
class FullyConnectedOp : public Operation {
public:
- FullyConnectedOp(const IODescriptor& arg1, const IODescriptor& arg2)
- : Operation(Type::fullyConnected, {arg1, arg2}) {
+ FullyConnectedOp(Output* arg1, Output* arg2) : Operation(Type::fullyConnected, {arg1, arg2}) {
inferOutputShapes();
}
*/
class GatherOp : public Operation {
public:
- GatherOp(const IODescriptor& data, const IODescriptor& indices, int32_t axis)
+ GatherOp(Output* data, Output* indices, int32_t axis)
: Operation(Type::gather, {data, indices}), _axis(axis) {
inferOutputShapes();
}
class GemmOp : public Operation {
public:
- GemmOp(IODescriptor a, IODescriptor b, IODescriptor c) :
- Operation(Type::gemmOp, {a, b, c}) {
+ GemmOp(Output* a, Output* b, Output* c) : Operation(Type::gemmOp, {a, b, c}) {
inferOutputShapes();
}
class LeakyReluOp : public Operation {
public:
- explicit LeakyReluOp(const IODescriptor& arg, float alpha)
- : Operation(Type::leakyReLU, {arg}), _alpha(alpha) {
+ explicit LeakyReluOp(Output* arg, float alpha)
+ : Operation(Type::leakyReLU, {arg}), _alpha(alpha) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
class OutputOp : public Operation {
public:
- explicit OutputOp(IODescriptor input) : Operation(Type::output, {input}) {}
+ explicit OutputOp(Output* input) : Operation(Type::output, {input}) {}
};
} // namespace ops
public:
/**
* @brief Class for Pad operation in modelIR
- * @param arg IODescriptor
+ * @param arg The input
* @param numDims Number of dimensions
* @param paddings Vector with pairs of paddings (left, right)
* @param scalar_value Constant value filling padded region
*/
- PadOp(const IODescriptor& arg, int32_t numDims,
+ PadOp(Output* arg, int32_t numDims,
const std::vector<std::pair<int32_t, int32_t>>& paddings,
const Scalar& scalar_value)
: Operation(Type::pad, {arg}), _numDims(numDims),
EMPTY // Consider that there are no elements outside of input shape
};
- PoolOp(const IODescriptor& arg,
+ PoolOp(Output* arg,
PoolingType pooling_type,
const Shape& window_shape,
const Shape& strides,
* @param keep_dims whether to keep the original rank
* @param func_type function to reduce the tensor with (should be associative)
*/
- ReduceFOp(const IODescriptor& arg,
+ ReduceFOp(Output* arg,
const std::vector<int32_t>& reduce_dims,
bool keep_dims,
FuncType func_type)
class ReluOp : public Operation {
public:
- explicit ReluOp(const IODescriptor& arg) : Operation(Type::ReLU, {arg}) {
+ explicit ReluOp(Output* arg) : Operation(Type::ReLU, {arg}) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
class ReshapeOp : public Operation {
public:
- ReshapeOp(const IODescriptor& arg, const Shape& shape) : Operation(Type::reshape, {arg}) {
+ ReshapeOp(Output* arg, const Shape& shape) : Operation(Type::reshape, {arg}) {
const Shape& input_shape = getInputShape(0);
auto output_shape = shape;
nearestNeighbor, // TODO: BICUBIC and BILINEAR
};
- ResizeOp(const IODescriptor& arg, ResizeMethod mode, const std::vector<float>& scales)
+ ResizeOp(Output* arg, ResizeMethod mode, const std::vector<float>& scales)
: Operation(Type::resizeIm, {arg}), _mode(mode), _scales(scales) {
// Infer output shape based on given scales.
auto& input_shape = getInputShape(0);
setOutputShape(0, output_shape);
}
- ResizeOp(const IODescriptor& arg, ResizeMethod mode, const Shape& output_shape)
+ ResizeOp(Output* arg, ResizeMethod mode, const Shape& output_shape)
: Operation(Type::resizeIm, {arg}), _mode(mode) {
// Calculate scales based on given shape.
auto& input_shape = getInputShape(0);
class ScaleOp : public Operation {
public:
- ScaleOp(const IODescriptor& arg1, const IODescriptor& arg2)
- : Operation(Type::scale, {arg1, arg2}) {
+ ScaleOp(Output* arg1, Output* arg2) : Operation(Type::scale, {arg1, arg2}) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
class SigmoidOp : public Operation {
public:
- explicit SigmoidOp(const IODescriptor& arg) : Operation(Type::sigmoid, {arg}) {
+ explicit SigmoidOp(Output* arg) : Operation(Type::sigmoid, {arg}) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
class SliceOp : public Operation {
public:
- SliceOp(const IODescriptor& arg, const Shape& starts, const Shape& sizes) :
- Operation(Type::slice, {arg}),
- _starts(starts),
- _sizes(sizes) {
+ SliceOp(Output* arg, const Shape& starts, const Shape& sizes)
+ : Operation(Type::slice, {arg}), _starts(starts), _sizes(sizes) {
inferOutputShapes();
}
*/
class SoftmaxOp : public Operation {
public:
- SoftmaxOp(const IODescriptor& arg, int32_t axis) : Operation(Type::softmax, {arg}), _axis(axis) {
+ SoftmaxOp(Output* arg, int32_t axis) : Operation(Type::softmax, {arg}), _axis(axis) {
setOutputShape(0, getInputShape(0));
}
class SqrtOp : public Operation {
public:
- SqrtOp(const IODescriptor& arg) : Operation(Type::sqrt, {arg}) {
+ SqrtOp(Output* arg) : Operation(Type::sqrt, {arg}) {
setOutputShape(0, getInputShape(0));
};
};
class SqueezeOp : public Operation {
public:
- SqueezeOp(const IODescriptor& arg, const std::vector<int32_t>& dims_to_squeeze)
- : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze) {
+ SqueezeOp(Output* arg, const std::vector<int32_t>& dims_to_squeeze)
+ : Operation(Type::squeeze, {arg}), _dims_to_squeeze(dims_to_squeeze) {
// Infer output shape.
inferOutputShapes();
}
class TanhOp : public Operation {
public:
- explicit TanhOp(const IODescriptor& arg) : Operation(Type::tanh, {arg}) {
+ explicit TanhOp(Output* arg) : Operation(Type::tanh, {arg}) {
// Infer output shape.
setOutputShape(0, getInputShape(0));
}
*/
class TransposeOp : public Operation {
public:
- TransposeOp(const IODescriptor& arg, const std::vector<std::size_t>& axis_order);
+ TransposeOp(Output* arg, const std::vector<std::size_t>& axis_order);
const std::vector<std::size_t>& getAxisOrder() const { return _axisOrder; }
void setInput(const std::string& name, const TensorVariant& data);
- TensorVariant getResult(IODescriptor tensor);
+ TensorVariant getResult(const Operation::Output* tensor);
void visit(ops::BatchNormOp& op) override;
void visit(ops::BiasAddOp& op) override;
void visit(ops::TanhOp& op) override;
void visit(ops::TransposeOp& op) override;
- void dump(Operation& op, bool all = false);
-
private:
/// @brief Gets the computed inputs for the operation.
std::vector<std::reference_wrapper<const TensorVariant>> getInputTensors(const Operation& op);
void AclCppOpGenerator::visit(ops::ConcatOp& op) {
const auto& ir_inputs = op.getInputs();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_output = op.getOutput(0);
static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES",
"arm_compute::DataLayoutDimension::CHANNEL",
void AclCppOpGenerator::visit(ops::SoftmaxOp& op) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
auto in = AF::id(tensorName(ir_input));
void AclCppOpGenerator::visit(ops::PoolOp& op) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
const char* pooling_type = nullptr;
void AclCppOpGenerator::visit(ops::FullyConnectedOp& op) {
assert(op.getNumInputs() == 2);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_weights = op.getInput(1)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_weights = op.getInput(1)->getProducer();
+ const auto* ir_output = op.getOutput(0);
- auto ir_weights_op = dynamic_cast<mir::ops::ConstantOp*>(ir_weights->getNode());
+ auto ir_weights_op = dynamic_cast<const mir::ops::ConstantOp*>(ir_weights->getNode());
if (ir_weights_op == nullptr)
throw AclCppException("Unsupported operation type");
void AclCppOpGenerator::visit(ops::BiasAddOp& op) {
assert(op.getNumInputs() == 2);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_weights = op.getInput(1)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_weights = op.getInput(1)->getProducer();
+ const auto* ir_output = op.getOutput(0);
- auto ir_weights_op = dynamic_cast<ops::ConstantOp*>(ir_weights->getNode());
+ auto ir_weights_op = dynamic_cast<const ops::ConstantOp*>(ir_weights->getNode());
if (ir_weights_op == nullptr)
throw AclCppException("Unsupported operation type");
addToPersistentTensors(tensor);
}
-// FIXME: temporary decision
-static bool shouldSerializeConstant(ops::ConstantOp& op) {
+// FIXME: temporary solution
+static bool shouldSerializeConstant(const ops::ConstantOp& op) {
// Operations from 'self_serializing_ops_to_inputs' serializing tensors with appropriate index themselves,
// so we don't serialize them here, also we don't serialize tensors from dangling ConstantOp
static std::map<Operation::Type, std::size_t> self_serializing_ops_to_inputs{
void AclCppOpGenerator::visit(ops::ReshapeOp& op) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
// Get the id of the input tensor in the generated artifact.
auto in = AF::id(tensorName(ir_input));
// May be not a perfect implementation, using the CLPixelWiseMultiplication ACL function taking
// two input tensors with the same shapes.
assert(op.getNumInputs() == 2);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_weights = op.getInput(1)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_weights = op.getInput(1)->getProducer();
+ const auto* ir_output = op.getOutput(0);
- auto ir_weights_op = dynamic_cast<ops::ConstantOp*>(ir_weights->getNode());
+ auto ir_weights_op = dynamic_cast<const ops::ConstantOp*>(ir_weights->getNode());
if (ir_weights_op == nullptr)
throw AclCppException("Unsupported operation type");
void AclCppOpGenerator::visit(ops::DropoutOp& op) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
// Just copy input tensor to the output one.
void AclCppOpGenerator::visit(ops::ElementwiseOp& op) {
assert(op.getNumInputs() >= 2);
const auto& ir_inputs = op.getInputs();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_output = op.getOutput(0);
// Create the output tensor in the DOM and obtain its identifier.
auto out = genTensor(ir_output);
auto in1 = AF::id(tensorName(ir_inputs[0].getProducer()));
for (size_t i = 1; i < ir_inputs.size(); ++i) {
- IODescriptor ir_input = ir_inputs[i].getProducer();
+ const auto* ir_input = ir_inputs[i].getProducer();
// Get the identifier of the second input tensor in the DOM.
auto in2 = AF::id(tensorName(ir_input));
void AclCppOpGenerator::visit(ops::PadOp& op) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
// Get the id of the input tensor.
auto input = AF::id(tensorName(ir_input));
template <typename Op>
void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, const string& suffix) {
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_weights = op.getInput(1)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_weights = op.getInput(1)->getProducer();
+ const auto* ir_output = op.getOutput(0);
- auto ir_weights_op = dynamic_cast<ops::ConstantOp*>(ir_weights->getNode());
+ auto ir_weights_op = dynamic_cast<const ops::ConstantOp*>(ir_weights->getNode());
if (ir_weights_op == nullptr)
throw AclCppException("Unsupported operation type");
genTensorDeallocation(_infBlock, transposed_output);
}
-void AclCppOpGenerator::genActivation(mir::Operation& op, const std::string& activation_name,
+void AclCppOpGenerator::genActivation(const Operation& op, const std::string& activation_name,
float a, float b) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
// Get the id of the input tensor.
auto in = AF::id(tensorName(ir_input));
return out;
}
-string AclCppOpGenerator::tensorName(IODescriptor ir_tensor) const {
+string AclCppOpGenerator::tensorName(const Operation::Output* ir_tensor) const {
string tensor_name;
// TODO Use the tensor name instead of the operation name.
return id;
}
-shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(IODescriptor ir_tensor) {
+shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(const Operation::Output* ir_tensor) {
return genTensor(tensorName(ir_tensor), ir_tensor->getShape(),
!ir_tensor->getNode()->getName().empty());
}
void AclCppOpGenerator::genNamed(Graph* graph) {
const auto& inputs = graph->getInputs();
if (inputs.size() == 1) {
- auto* input_op = inputs[0];
+ const auto* input_op = inputs[0];
auto f = _artifactClass->func(true, "arm_compute::CLTensor&", "getInput");
auto b = f->getBlock();
auto id = AF::id(tensorName(input_op->getOutput(0)));
const auto& outputs = graph->getOutputs();
if (outputs.size() == 1) {
- auto* output_op = outputs[0];
+ const auto* output_op = outputs[0];
auto f = _artifactClass->func(true, "arm_compute::CLTensor&", "getOutput");
auto b = f->getBlock();
auto id = AF::id(tensorName(output_op->getInput(0)->getProducer()));
void AclCppOpGenerator::visit(mir::ops::TransposeOp& op) {
assert(op.getNumInputs() == 1);
- IODescriptor ir_input = op.getInput(0)->getProducer();
- IODescriptor ir_output = op.getOutput(0);
+ const auto* ir_input = op.getInput(0)->getProducer();
+ const auto* ir_output = op.getOutput(0);
// Get the input node tensor id in the DOM.
shared_ptr<ArtifactId> input = AF::id(tensorName(ir_input));
* @param b - betha parameter used by some activation functions: LINEAR, LU_BOUNDED_RELU, TANH.
*/
void
- genActivation(mir::Operation& op, const std::string& activation_name, float a = 0, float b = 0);
+ genActivation(const mir::Operation& op, const std::string& activation_name, float a = 0,
+ float b = 0);
/**
* @brief Used to generate a binary addition operation in handling of the elementwise.
/**
* @brief Generates a unique name for the tensor.
*/
- std::string tensorName(mir::IODescriptor ir_tensor) const;
+ std::string tensorName(const mir::Operation::Output* ir_tensor) const;
/**
* @brief Generates variables tensor shape in DOM.
* @param ir_tensor - the ModelIR tensor.
* @return - a DOM identifier for the created tensor.
*/
- std::shared_ptr<ArtifactId> genTensor(mir::IODescriptor ir_tensor);
+ std::shared_ptr<ArtifactId> genTensor(const mir::Operation::Output* ir_tensor);
/**
* @brief generate transposing operation, @p mir_perm contains dimensions in MIR order (batch has index 0)
}
void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
- std::vector<mir::IODescriptor> outputs;
+ std::vector<mir::Operation::Output*> outputs;
// If op input not met yet - consider it as model input
if (op.input_size() > 0
- && _blobNameToIODescriptor.find(op.input(0)) == _blobNameToIODescriptor.end()) {
+ && _blobNameToOutput.find(op.input(0)) == _blobNameToOutput.end()) {
outputs = _opCreator->createInput(op.input(0), _inputShapes.front());
- _blobNameToIODescriptor[op.input(0)] = outputs.at(0);
+ _blobNameToOutput[op.input(0)] = outputs.at(0);
_inputShapes.erase(_inputShapes.begin(), _inputShapes.begin() + 1);
}
for (int i = 0; i < outputs.size(); ++i) {
// caffe2 input blob name could be same as output blob name, and next line will overwrite
- // '_blobNameToIODescriptor' element, but in all networks that I saw it was not a problem
- _blobNameToIODescriptor[op.output(i)] = outputs.at(i);
+ // '_blobNameToOpOutput' element, but in all networks that I saw it was not a problem
+ _blobNameToOutput[op.output(i)] = outputs.at(i);
}
_lastMIROp = outputs.at(0)->getNode();
return mir::TensorVariant(element_type, tensor_shape, src_data);
}
-std::vector<mir::IODescriptor> Caffe2Importer::getInputMIROps(const OperatorDef& op) {
+std::vector<mir::Operation::Output*> Caffe2Importer::getInputMIROps(const OperatorDef& op) {
// caffe2 operation inputs not same as MIR inputs (ex: in caffe2 conv kernel and bias also inputs)
// so choose caffe2 inputs, which are 'real' inputs
- std::vector<mir::IODescriptor> inputs;
+ std::vector<mir::Operation::Output*> inputs;
SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
if (opType != SupportedCaffe2OpType::givenTensorFill &&
opType != SupportedCaffe2OpType::constantFill &&
opType != SupportedCaffe2OpType::givenTensorInt64Fill)
{
for (auto& i : op.input())
- if (_blobNameToIODescriptor.find(i) != _blobNameToIODescriptor.end())
- inputs.push_back(_blobNameToIODescriptor[i]);
+ if (_blobNameToOutput.find(i) != _blobNameToOutput.end())
+ inputs.push_back(_blobNameToOutput[i]);
}
return inputs;
// set of strings describing incorrect parts of network and parts of network unsupported by NNC
std::set<std::string> _problemsOpSet;
- // This map maps caffe2 operators names to MIR operators
- // that correspond to previous caffe2 operators
- std::unordered_map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
+ // Maps Caffe2 operator input names to corresponding MIR operation outputs.
+ std::unordered_map<std::string, mir::Operation::Output*> _blobNameToOutput;
mir::Operation* _lastMIROp = nullptr;
std::map<std::string, mir::TensorVariant> _MIRTensors;
mir::TensorVariant createTensor(const ::caffe2::OperatorDef& op);
/**
- * @brief Returns MIR ops, under given caffe2 op
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
*/
- std::vector<mir::IODescriptor> getInputMIROps(const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*> getInputMIROps(const ::caffe2::OperatorDef&);
/**
* @brief Mark output MIR nodes
}
static Shape getWindowShape(const ::caffe2::OperatorDef& op,
- const std::vector<IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
bool has_custom_kernel_size = hasArgument(op.arg(), "kernel_h") ||
hasArgument(op.arg(), "kernel_w");
return Shape{kernel_h, kernel_w};
}
-mir::IODescriptor Caffe2OpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) {
+mir::Operation::Output* Caffe2OpCreator::convertCaffeToMIR(mir::Operation::Output* arg) {
// NCHW -> NHWC
auto transpose = createOp<ops::TransposeOp>("CaffeToMIR", arg, std::vector<std::size_t>{0, 2, 3, 1});
return transpose->getOutput(0);
}
-mir::IODescriptor Caffe2OpCreator::convertMIRToCaffe(const mir::IODescriptor& arg) {
+mir::Operation::Output* Caffe2OpCreator::convertMIRToCaffe(mir::Operation::Output* arg) {
// NHWC -> NCHW
auto transpose = createOp<ops::TransposeOp>("MIRToCaffe", arg, std::vector<std::size_t>{0, 3, 1, 2});
return transpose->getOutput(0);
// Convert functions
//
-std::vector<mir::IODescriptor>
-Caffe2OpCreator::convertAdd(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertAdd(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op,
const MIRTensors& mir_tensors) {
- std::vector<mir::IODescriptor> add_input;
+ std::vector<mir::Operation::Output*> add_input;
for (const auto& i : inputs)
add_input.push_back(convertCaffeToMIR(i));
return {convertMIRToCaffe(add->getOutput(0))};
}
-std::vector<IODescriptor>
-Caffe2OpCreator::convertAveragePool(const std::vector<IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertAveragePool(const std::vector<mir::Operation::Output*>& inputs,
const OperatorDef& op) {
Shape window_shape = getWindowShape(op, inputs);
return {convertMIRToCaffe(pooling->getOutput(0))};
}
-std::vector<IODescriptor> Caffe2OpCreator::convertConv(const std::vector<IODescriptor>& inputs,
- const ::caffe2::OperatorDef& op,
- const MIRTensors& mir_tensors) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertConv(const std::vector<mir::Operation::Output*>& inputs,
+ const ::caffe2::OperatorDef& op,
+ const MIRTensors& mir_tensors) {
// dilation order: h w (not used)
Shape stride_shape(getStrides(op));
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<IODescriptor> Caffe2OpCreator::convertConcat(const std::vector<IODescriptor>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertConcat(const std::vector<mir::Operation::Output*>& inputs,
+ const ::caffe2::OperatorDef& op) {
int axis = getSingleArgument(op, "axis", 1);
auto result = createOp<ops::ConcatOp>("Concat", inputs, axis);
return {result->getOutput(0)};
}
-std::vector<IODescriptor> Caffe2OpCreator::convertDropout(const std::vector<IODescriptor>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertDropout(const std::vector<mir::Operation::Output*>& inputs,
+ const ::caffe2::OperatorDef& op) {
int is_test = getSingleArgument(op, "is_test", 0);
if (is_test)
return {inputs[0]};
return {dropout->getOutput(0)};
}
-std::vector<IODescriptor>
-Caffe2OpCreator::convertFullyConnected(const std::vector<IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertFullyConnected(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op,
const MIRTensors& mir_tensors) {
auto weights_tensor = transposeTensor<1, 0>(mir_tensors.at(op.input(1)));
}
-std::vector<IODescriptor> Caffe2OpCreator::convertMaxPool(const std::vector<IODescriptor>& inputs,
- const OperatorDef& op) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertMaxPool(const std::vector<mir::Operation::Output*>& inputs,
+ const OperatorDef& op) {
Shape window_shape = getWindowShape(op, inputs);
Shape strides(getStrides(op));
return {convertMIRToCaffe(pooling->getOutput(0))};
}
-std::vector<mir::IODescriptor>
-Caffe2OpCreator::convertMul(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertMul(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op,
const MIRTensors& mir_tensors) {
- std::vector<IODescriptor> input_descriptors;
+ std::vector<mir::Operation::Output*> input_descriptors;
for (const auto& i: inputs)
input_descriptors.push_back(convertCaffeToMIR(i));
return {convertMIRToCaffe(mul->getOutput(0))};
}
-std::vector<IODescriptor>
-Caffe2OpCreator::convertRelu(const std::vector<IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertRelu(const std::vector<mir::Operation::Output*>& inputs) {
auto relu = createOp<ops::ReluOp>("Relu", inputs[0]);
return {relu->getOutput(0)};
}
-std::vector<IODescriptor>
-Caffe2OpCreator::convertResizeNearest(const std::vector<IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op) {
// assume NCHW and convert to MIR (NHWC)
std::vector<float> scales(4);
return {convertMIRToCaffe(resize->getOutput(0))};
}
-std::vector<IODescriptor>
-Caffe2OpCreator::convertSigmoid(const std::vector<IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertSigmoid(const std::vector<mir::Operation::Output*>& inputs) {
auto result = createOp<ops::SigmoidOp>("Sigmoid", inputs[0]);
return {result->getOutput(0)};
}
-std::vector<IODescriptor> Caffe2OpCreator::convertSoftmax(const std::vector<IODescriptor>& inputs,
- const ::caffe2::OperatorDef& op) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
+ const ::caffe2::OperatorDef& op) {
int axis = getSingleArgument(op, "axis", 1);
auto softmax = createOp<ops::SoftmaxOp>("Softmax", inputs[0], axis);
return {softmax->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-Caffe2OpCreator::convertSpatialBN(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op,
const MIRTensors& mir_tensors) {
// overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<IODescriptor> Caffe2OpCreator::convertSum(const std::vector<IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertSum(const std::vector<mir::Operation::Output*>& inputs) {
const auto& input_shape = inputs[0]->getShape();
for (auto& in : inputs)
assert(input_shape == in->getShape() && "All Sum inputs must have same shape");
return {op->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-Caffe2OpCreator::convertClip(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertClip(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op) {
float max = getSingleArgument(op, "max", float(0));
}
-std::vector<mir::IODescriptor>
-Caffe2OpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+Caffe2OpCreator::convertReshape(const std::vector<mir::Operation::Output*>& inputs,
const ::caffe2::OperatorDef& op,
const MIRTensors& mir_tensors) {
// Check new shape input
return {reshape->getOutput(0)};
}
-std::vector<IODescriptor>
+std::vector<mir::Operation::Output*>
Caffe2OpCreator::createInput(const std::string& name, const mir::Shape& shape) {
auto variable = _graph->create<ops::InputOp>(name, shape);
return {variable->getOutput(0)};
public:
explicit Caffe2OpCreator(Graph* g) : _graph(g) {};
- void checkAdd(const ::caffe2::OperatorDef&, std::set<std::string>&);
-
void checkConvLikeOp(const ::caffe2::OperatorDef&, std::set<std::string>&);
void checkFC(const ::caffe2::OperatorDef&, std::set<std::string>&);
- void checkMul(const ::caffe2::OperatorDef&, std::set<std::string>&);
-
void checkSpatialBN(const ::caffe2::OperatorDef&, std::set<std::string>&);
void commonCheck(const ::caffe2::OperatorDef&, std::set<std::string>&);
- std::vector<mir::IODescriptor> convertAdd(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&, const MIRTensors&);
+ std::vector<mir::Operation::Output*>
+ createInput(const std::string& name, const mir::Shape& shape);
- std::vector<mir::IODescriptor> convertAveragePool(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertAdd(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&,
+ const MIRTensors&);
- std::vector<mir::IODescriptor> convertConv(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&, const MIRTensors&);
+ std::vector<mir::Operation::Output*>
+ convertAveragePool(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> convertConcat(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertConv(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&,
+ const MIRTensors&);
- std::vector<mir::IODescriptor> convertDropout(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertConcat(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> convertFullyConnected(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&,
- const MIRTensors&);
+ std::vector<mir::Operation::Output*>
+ convertDropout(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> createInput(const std::string& name, const mir::Shape& shape);
+ std::vector<mir::Operation::Output*>
+ convertFullyConnected(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&,
+ const MIRTensors&);
- std::vector<mir::IODescriptor> convertMaxPool(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertMaxPool(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> convertMul(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&, const MIRTensors&);
+ std::vector<mir::Operation::Output*>
+ convertMul(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&,
+ const MIRTensors&);
- std::vector<mir::IODescriptor> convertRelu(const std::vector<mir::IODescriptor>&);
+ std::vector<mir::Operation::Output*>
+ convertRelu(const std::vector<mir::Operation::Output*>&);
- std::vector<mir::IODescriptor> convertResizeNearest(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertResizeNearest(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> convertSigmoid(const std::vector<mir::IODescriptor>&);
+ std::vector<mir::Operation::Output*>
+ convertSigmoid(const std::vector<mir::Operation::Output*>&);
- std::vector<mir::IODescriptor> convertSoftmax(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertSoftmax(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> convertSpatialBN(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&, const MIRTensors&);
+ std::vector<mir::Operation::Output*>
+ convertSpatialBN(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&,
+ const MIRTensors&);
- std::vector<mir::IODescriptor> convertSum(const std::vector<mir::IODescriptor>&);
+ std::vector<mir::Operation::Output*>
+ convertSum(const std::vector<mir::Operation::Output*>&);
- std::vector<mir::IODescriptor> convertClip(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&);
+ std::vector<mir::Operation::Output*>
+ convertClip(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&);
- std::vector<mir::IODescriptor> convertReshape(const std::vector<mir::IODescriptor>&,
- const ::caffe2::OperatorDef&, const MIRTensors&);
+ std::vector<mir::Operation::Output*>
+ convertReshape(const std::vector<mir::Operation::Output*>&,
+ const ::caffe2::OperatorDef&,
+ const MIRTensors&);
private:
Graph* _graph = nullptr;
- mir::IODescriptor convertCaffeToMIR(const mir::IODescriptor& arg);
+ mir::Operation::Output* convertCaffeToMIR(mir::Operation::Output* arg);
- mir::IODescriptor convertMIRToCaffe(const mir::IODescriptor& arg);
+ mir::Operation::Output* convertMIRToCaffe(mir::Operation::Output* arg);
template <typename OpType, typename ...Types>
mir::Operation* createOp(const std::string& name, Types&& ... args);
}
void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& layer) {
- auto inputs = getMIRInputsForLayer(layer);
+ std::vector<mir::Operation::Output*> inputs = getMIRInputsForLayer(layer);
+ std::vector<mir::Operation::Output*> outputs;
- std::vector<IODescriptor> outputs;
- CaffeOpType op_type = _operatorTypes.at(layer.type());
-
- switch (op_type) {
+ switch (_operatorTypes.at(layer.type())) {
case CaffeOpType::input:
outputs = _opCreator->convertInput(layer);
break;
assert(layer.top_size() == static_cast<int>(outputs.size()) && "Number of outputs differs.");
for (int i = 0; i < layer.top_size(); ++i)
- _blobNameToIODescriptor[layer.top(i)] = outputs.at(i);
+ _blobNameToOpOutput[layer.top(i)] = outputs.at(i);
}
void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
throw PassException("Deprecated Caffe input types are not supported");
}
-std::vector<mir::IODescriptor> CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) {
- std::vector<mir::IODescriptor> inputs;
+std::vector<mir::Operation::Output*>
+CaffeImporter::getMIRInputsForLayer(const LayerParameter& layer) {
+ std::vector<mir::Operation::Output*> inputs;
for (const auto& input_name : layer.bottom())
- inputs.push_back(_blobNameToIODescriptor.at(input_name));
+ inputs.push_back(_blobNameToOpOutput.at(input_name));
return inputs;
}
// For now, we assume that:
// - there is exactly one output;
// - the output is from the last layer.
- auto output = _blobNameToIODescriptor[last_layer.top(0)];
+ auto output = _blobNameToOpOutput[last_layer.top(0)];
_graph->create<mir::ops::OutputOp>(output->getNode()->getName(), output);
output->getNode()->setName("");
}
mir::Graph* _graph;
std::unique_ptr<CaffeOpCreator> _opCreator;
- // Maps Caffe blob name to MIR IODescriptor.
- std::map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
+ // Maps Caffe blob names to corresponding MIR operation outputs.
+ std::map<std::string, mir::Operation::Output*> _blobNameToOpOutput;
static const std::map<std::string, CaffeOpType> _operatorTypes;
// set of strings describing incorrect parts of network and parts of network unsupported by NNC
void collectUnsupportedOp(const ::caffe::LayerParameter& lp);
/**
- * @brief Return MIR IODescriptors for the inputs of the given layer.
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given layer.
*/
- std::vector<mir::IODescriptor> getMIRInputsForLayer(const ::caffe::LayerParameter& layer);
+ std::vector<mir::Operation::Output*> getMIRInputsForLayer(const ::caffe::LayerParameter& layer);
void processDeprecatedInput();
};
using namespace mir;
using namespace ::caffe;
-mir::IODescriptor CaffeOpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) {
+mir::Operation::Output* CaffeOpCreator::convertCaffeToMIR(mir::Operation::Output* arg) {
// NCHW -> NHWC
auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 2, 3, 1});
return transpose->getOutput(0);
}
-mir::IODescriptor CaffeOpCreator::convertMIRToCaffe(const mir::IODescriptor& arg) {
+mir::Operation::Output* CaffeOpCreator::convertMIRToCaffe(mir::Operation::Output* arg) {
// NHWC -> NCHW
auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 3, 1, 2});
return transpose->getOutput(0);
}
-mir::IODescriptor CaffeOpCreator::createAdd(mir::IODescriptor arg1, mir::IODescriptor arg2) {
- std::vector<IODescriptor> inputs{arg1, arg2};
+mir::Operation::Output*
+CaffeOpCreator::createAdd(mir::Operation::Output* arg1, mir::Operation::Output* arg2) {
+ std::vector<mir::Operation::Output*> inputs{arg1, arg2};
auto op = createOp<ops::ElementwiseOp>("", inputs, ops::ElementwiseOp::OpType::add);
return op->getOutput(0);
}
-mir::IODescriptor CaffeOpCreator::createMul(mir::IODescriptor arg1, mir::IODescriptor arg2) {
- std::vector<IODescriptor> inputs{arg1, arg2};
+mir::Operation::Output*
+CaffeOpCreator::createMul(mir::Operation::Output* arg1, mir::Operation::Output* arg2) {
+ std::vector<mir::Operation::Output*> inputs{arg1, arg2};
auto op = createOp<ops::ElementwiseOp>("", inputs, ops::ElementwiseOp::OpType::mul);
return op->getOutput(0);
}
/// @brief Split arg into @p num_parts equal parts along @p axis axis.
-std::vector<mir::IODescriptor>
-CaffeOpCreator::createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t axis) {
+std::vector<mir::Operation::Output*>
+CaffeOpCreator::createSplit(mir::Operation::Output* arg, int32_t num_parts, int32_t axis) {
const auto& arg_shape = arg->getShape();
assert(axis >= 0 && axis < arg_shape.rank());
Shape sizes(arg_shape);
sizes.dim(axis) = part_size;
- std::vector<mir::IODescriptor> outputs(num_parts);
+ std::vector<mir::Operation::Output*> outputs(num_parts);
for (int32_t i = 0; i < num_parts; ++i) {
outputs[i] = createOp<ops::SliceOp>("", arg, starts, sizes)->getOutput(0);
starts.dim(axis) += part_size;
}
/// @brief Helper function for creating FullyConnected operation with non-square input.
-IODescriptor
-CaffeOpCreator::createFullyConnected(const mir::IODescriptor& input,
- const mir::IODescriptor& weights,
+mir::Operation::Output*
+CaffeOpCreator::createFullyConnected(mir::Operation::Output* input,
+ mir::Operation::Output* weights,
int32_t axis) {
const auto& input_shape = input->getShape();
const auto& weights_shape = weights->getShape();
return TensorVariant(dtype, shape, src_data);
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertInput(const LayerParameter& layer) {
const auto& params = layer.input_param();
const auto num_inputs = layer.top_size();
const auto num_shapes = params.shape_size();
- std::vector<mir::IODescriptor> descriptors;
+ std::vector<mir::Operation::Output*> outputs;
assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes.");
const auto& blob_shape = params.shape(num_shapes == 1 ? 0 : i);
Shape shape = ShapeHelper::createShape(blob_shape.dim(), blob_shape.dim_size());
auto variable = createOp<ops::InputOp>(blob_name, shape);
- descriptors.push_back(variable->getOutput(0));
+ outputs.push_back(variable->getOutput(0));
}
- return descriptors;
+ return outputs;
}
static void convertConvolutionParam(const ConvolutionParameter& conv_param, Shape& strides,
problems_op_set.insert("Conv2D: Unsupported number of pads");
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertConvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.convolution_param();
Shape strides;
std::vector<int32_t> padding;
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.convolution_param();
Shape strides;
std::vector<int32_t> padding;
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertInnerProduct(const LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.inner_product_param();
auto weights_tensor = convertBlob(layer.blobs(0));
return {result};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertConcat(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.concat_param();
auto concat = createOp<ops::ConcatOp>(layer.name(), inputs, params.axis());
return {concat->getOutput(0)};
problemsOpSet.insert("Pooling: conflicting padding properties in pooling");
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.pooling_param();
Shape window_shape;
Shape strides;
return {convertMIRToCaffe(pooling->getOutput(0))};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertSoftmax(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.softmax_param();
// CPP and ACL backends are able to perform Softmax only along the last axis.
* @todo Decide how to react to the absence of "shape" parameter.
* @todo Support zero values in "shape" parameter.
*/
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertReshape(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.reshape_param();
Shape new_shape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size());
auto reshape = createOp<ops::ReshapeOp>(layer.name(), inputs[0], new_shape);
return {reshape->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
mir::Operation* relu;
if (layer.relu_param().has_negative_slope()) {
float alpha = layer.relu_param().negative_slope();
return {relu->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertScale(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.scale_param();
auto scale = createOp<ops::ConstantOp>("", convertBlob(layer.blobs(0)))->getOutput(0);
auto result = createOp<ops::ScaleOp>(layer.name(), convertCaffeToMIR(inputs[0]), scale);
"Unexpected shape of scale parameter in batch norm");
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.batch_norm_param();
float eps = opts.eps();
auto scale_weight = convertBlob(layer.blobs(2));
return {convertMIRToCaffe(result->getOutput(0))};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertDropout(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.dropout_param();
auto dropout = createOp<ops::DropoutOp>(layer.name(), inputs[0], opts.dropout_ratio());
return {dropout->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertELU(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.elu_param();
auto elu = createOp<ops::EluOp>(layer.name(), inputs[0], opts.alpha());
return {elu->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.embed_param();
auto data = createOp<ops::ConstantOp>(layer.name() + ".weights", convertBlob(layer.blobs(0)));
auto result = createOp<ops::GatherOp>(layer.name(), data->getOutput(0), inputs[0], 0);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertSigmoid(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto result = createOp<ops::SigmoidOp>(layer.name(), inputs[0]);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertTanH(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto tanh = createOp<ops::TanhOp>(layer.name(), inputs[0]);
return {tanh->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertEltwise(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto& opts = layer.eltwise_param();
ops::ElementwiseOp::OpType optype;
- std::vector<mir::IODescriptor> input_tensors;
+ std::vector<mir::Operation::Output*> input_tensors;
switch (opts.operation()){
case EltwiseParameter_EltwiseOp_PROD:
optype = ops::ElementwiseOp::OpType::mul;
if (opts.coeff().Get(i) != 1.0f) {
TensorVariant coeff_tensor(DTYPE::FLOAT32, Shape{1}, &opts.coeff().Get(i));
auto coeff_const = createOp<ops::ConstantOp>(layer.name() + "_const", coeff_tensor);
- std::vector<mir::IODescriptor> mul_inputs;
+ std::vector<mir::Operation::Output*> mul_inputs;
mul_inputs.push_back(coeff_const->getOutput(0));
mul_inputs.push_back(inputs[i]);
auto mul = createOp<ops::ElementwiseOp>(layer.name() + "_mul",
return {elementwise->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertSplit(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
- std::vector<mir::IODescriptor> outputs(layer.top_size(), inputs.at(0));
+ const std::vector<mir::Operation::Output*>& inputs) {
+ std::vector<mir::Operation::Output*> outputs(layer.top_size(), inputs.at(0));
return outputs;
}
* In this implementation the inner products for all gates are performed as single inner product for
* efficiency.
*/
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
CaffeOpCreator::convertLSTM(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
const auto& params = layer.recurrent_param();
// Inputs to the layer.
auto x_xw_b = createOp<ops::BiasAddOp>("", x_xw, xb)->getOutput(0);
// Split input and continuation tensors into seq_length slices.
- std::vector<mir::IODescriptor> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
- std::vector<mir::IODescriptor> cont_slices = createSplit(cont, seq_length, 0);
- std::vector<mir::IODescriptor> h_slices(seq_length);
+ std::vector<mir::Operation::Output*> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
+ std::vector<mir::Operation::Output*> cont_slices = createSplit(cont, seq_length, 0);
+ std::vector<mir::Operation::Output*> h_slices(seq_length);
for (int32_t t = 0; t < seq_length; t++) {
auto c_cont_t = createMul(c_t, cont_slices[t]);
auto x_xw_b_t = x_xw_b_slices[t];
auto h_hw_t = createFullyConnected(h_cont_t, hw, 2);
auto activation_inputs_concat = createAdd(x_xw_b_t, h_hw_t);
- std::vector<mir::IODescriptor> activation_inputs = createSplit(activation_inputs_concat, 4, 2);
+ auto activation_inputs = createSplit(activation_inputs_concat, 4, 2);
auto i_t = createOp<ops::SigmoidOp>("", activation_inputs[0])->getOutput(0);
auto f_t = createOp<ops::SigmoidOp>("", activation_inputs[1])->getOutput(0);
public:
explicit CaffeOpCreator(mir::Graph* g) : _graph(g) {};
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertInput(const caffe::LayerParameter& layer);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertConvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertInnerProduct(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertConcat(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertPooling(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertSoftmax(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertReshape(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertReLU(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertScale(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertBatchNorm(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertDropout(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertDeconvolution(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertELU(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertEmbed(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertSigmoid(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertTanH(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertEltwise(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertSplit(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertLSTM(const caffe::LayerParameter& layer,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
void checkConvolution(const caffe::ConvolutionParameter& layer, std::set<std::string>&);
private:
mir::Graph* _graph = nullptr;
- mir::IODescriptor convertCaffeToMIR(const mir::IODescriptor& arg);
+ mir::Operation::Output* convertCaffeToMIR(mir::Operation::Output* arg);
- mir::IODescriptor convertMIRToCaffe(const mir::IODescriptor& arg);
+ mir::Operation::Output* convertMIRToCaffe(mir::Operation::Output* arg);
- mir::IODescriptor createAdd(mir::IODescriptor arg1, mir::IODescriptor arg2);
+ mir::Operation::Output* createAdd(mir::Operation::Output* arg1, mir::Operation::Output* arg2);
- mir::IODescriptor createMul(mir::IODescriptor arg1, mir::IODescriptor arg2);
+ mir::Operation::Output* createMul(mir::Operation::Output* arg1, mir::Operation::Output* arg2);
- std::vector<mir::IODescriptor>
- createSplit(mir::IODescriptor arg, int32_t num_parts, int32_t axis);
+ std::vector<mir::Operation::Output*>
+ createSplit(mir::Operation::Output* arg, int32_t num_parts, int32_t axis);
- mir::IODescriptor
- createFullyConnected(const mir::IODescriptor& input,
- const mir::IODescriptor& weights,
+ mir::Operation::Output*
+ createFullyConnected(mir::Operation::Output* input,
+ mir::Operation::Output* weights,
int32_t axis);
TensorVariant convertBlob(const caffe::BlobProto& blob);
_inputTensors.emplace(name, t);
}
-TensorVariant NNInterpreter::getResult(IODescriptor tensor) {
+TensorVariant NNInterpreter::getResult(const Operation::Output* tensor) {
return _opResults.at(tensor->getNode()).at(tensor->getIndex());
}
g->accept(&interpreter);
- for (auto out_node : g->getOutputs()) {
- const auto& tensor = interpreter.getResult(out_node->getInput(0)->getProducer());
+ for (const auto* output_op : g->getOutputs()) {
+ const auto& tensor = interpreter.getResult(output_op->getInput(0)->getProducer());
#ifdef NNC_HDF5_SUPPORTED
- writeTensorToHDF5File(tensor, out_node->getName(), cli::artifactDir);
+ writeTensorToHDF5File(tensor, output_op->getName(), cli::artifactDir);
#else
- std::cout << "Result <" << out_node->getName()
+ std::cout << "Result <" << output_op->getName()
<< "> wasn't saved, due to lack of HDF5" << std::endl;
#endif // NNC_HDF5_SUPPORTED
}
const onnx::TensorProto* onnx_tensor = onnx_tensors[name];
_constantTensors.insert(std::make_pair(name, createTensor(onnx_tensor)));
auto constant = _graph->create<mir::ops::ConstantOp>(name, _constantTensors.at(name));
- _tensorNameToIODescriptor[name] = constant->getOutput(0);
+ _tensorNameToOutput[name] = constant->getOutput(0);
} else {
const auto& onnx_input_shape = input.type().tensor_type().shape();
mir::Shape shape(onnx_input_shape.dim_size());
}
// TODO: Temporary solution!
auto node = _graph->create<mir::ops::InputOp>(name, shape);
- _tensorNameToIODescriptor[name] = node->getOutput(0);
+ _tensorNameToOutput[name] = node->getOutput(0);
}
}
}
assert(onnx_node.has_op_type());
auto op_type = onnx_node.op_type().c_str();
// Fill inputs of the given node
- std::vector<mir::IODescriptor> inputs(onnx_node.input_size());
+ std::vector<mir::Operation::Output*> inputs(onnx_node.input_size());
+ std::vector<mir::Operation::Output*> outputs;
+
for (int i = 0; i < onnx_node.input_size(); i++) {
auto& name = onnx_node.input(i);
if (name.size() != 0) {
- assert(_tensorNameToIODescriptor.find(name) != _tensorNameToIODescriptor.end());
- inputs[i] = _tensorNameToIODescriptor[name];
+ assert(_tensorNameToOutput.find(name) != _tensorNameToOutput.end());
+ inputs[i] = _tensorNameToOutput[name];
}
}
- std::vector<mir::IODescriptor> outputs;
auto* onnx_op_type = ONNXPerfectHash::getONNXOpType(op_type, onnx_node.op_type().size());
switch (onnx_op_type->opCode) {
// Set outputs' names
for (int i = 0; i < outputs.size(); i++) {
outputs[i]->getNode()->setName(onnx_node.output(i));
- auto result = _tensorNameToIODescriptor.emplace(outputs[i]->getNode()->getName(), outputs[i]);
+ auto result = _tensorNameToOutput.emplace(outputs[i]->getNode()->getName(), outputs[i]);
if(!result.second)
throw PassException("Name duplication: " + outputs[i]->getNode()->getName());
}
private:
void createGraphInputs();
- // This map maps onnx tensor names to MIR operations/nodes
- std::map<std::string, mir::IODescriptor> _tensorNameToIODescriptor;
+ // Maps ONNX tensor names to corresponding MIR operation outputs.
+ std::map<std::string, mir::Operation::Output*> _tensorNameToOutput;
// This map keeps named tensors used as graph input initializers.
// In addition here could be tensors from opGivenTensorFill and opConstant
std::map<std::string, mir::TensorVariant> _constantTensors;
- std::vector<mir::IODescriptor> _graphOutputs;
+ std::vector<mir::Operation::Output*> _graphOutputs;
std::string _modelFilename;
std::unique_ptr<onnx::ModelProto> _model;
mir::Graph* _graph;
}
};
-std::vector<IODescriptor>
-ONNXOpCreator::convertConv2D(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertConv2D(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
assert(inputs.size() >= 2);
return {convertMIRToONNX(result->getOutput(0))};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertConcat(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertConcat(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
bool found;
int axis;
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertGather(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertGather(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
bool found;
int value;
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertPad(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertPad(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
bool found;
float value;
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertPool(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertPool(const std::vector<mir::Operation::Output*>& inputs,
ONNXOpCode op_code,
const onnx::NodeProto& onnx_node) {
ops::PoolOp::BorderType border_type;
return {convertMIRToONNX(result->getOutput(0))};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
int axis;
bool found;
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertReshape(const std::vector<mir::Operation::Output*>& inputs) {
// The original shape
const auto& in_shape = inputs[0]->getShape();
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertUnsqueeze(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertUnsqueeze(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
auto* axes = findAttribute(onnx_node, "axes");
assert(axes && axes->ints_size());
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertRelu(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertRelu(const std::vector<mir::Operation::Output*>& inputs) {
assert(inputs.size() == 1);
auto result = createOp<ops::ReluOp>(inputs[0]);
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertSigmoid(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertSigmoid(const std::vector<mir::Operation::Output*>& inputs) {
assert(inputs.size() == 1);
auto result = createOp<ops::SigmoidOp>(inputs[0]);
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertElementwise(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertElementwise(const std::vector<mir::Operation::Output*>& inputs,
mir::ops::ElementwiseOp::OpType op_type) {
auto result = createOp<ops::ElementwiseOp>(inputs, op_type);
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertUpsample(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertUpsample(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& node) {
bool success;
std::string mode;
scales_vector)->getOutput(0))};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertBatchNorm(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertBatchNorm(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node,
InputTensors& input_tensors) {
// overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
return {convertMIRToONNX(result->getOutput(0))};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertDropout(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertDropout(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
bool found;
float value;
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertScale(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertScale(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
bool found;
float value;
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertShape(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertShape(const std::vector<mir::Operation::Output*>& inputs) {
const auto& input_shape = inputs[0]->getShape();
int size = input_shape.rank();
Shape output_shape{size};
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
+std::vector<mir::Operation::Output*>
ONNXOpCreator::convertGivenTensorFill(const onnx::NodeProto& onnx_node,
InputTensors& input_tensors) {
auto values_att = findAttribute(onnx_node, "values");
return {result->getOutput(0)};
}
-std::vector<IODescriptor>
+std::vector<mir::Operation::Output*>
ONNXOpCreator::convertConstant(const onnx::NodeProto& onnx_node,
InputTensors& input_tensors) {
assert((onnx_node.attribute_size() == 1) &&
return {op};
}
-std::vector<IODescriptor>
-ONNXOpCreator::convertGemm(const std::vector<mir::IODescriptor>& inputs,
+std::vector<mir::Operation::Output*>
+ONNXOpCreator::convertGemm(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node) {
bool found;
int ivalue;
beta_tensor = TensorVariant(beta_tensor, mult_a_b);
}
auto beta = createOp<ops::ConstantOp>(beta_tensor)->getOutput(0);
- std::vector<IODescriptor> descriptors = {beta, input_c};
- auto c_mult = createOp<ops::ElementwiseOp>(descriptors,
+ std::vector<mir::Operation::Output*> mul_inputs = {beta, input_c};
+ auto c_mult = createOp<ops::ElementwiseOp>(mul_inputs,
ops::ElementwiseOp::OpType::mul)->getOutput(0);
assert(c_mult->getShape() == mult_a_b);
auto result = createOp<ops::GemmOp>(input_a, input_b, c_mult);
return {result->getOutput(0)};
}
-mir::IODescriptor ONNXOpCreator::convertONNXToMIR(mir::IODescriptor arg) {
+mir::Operation::Output* ONNXOpCreator::convertONNXToMIR(mir::Operation::Output* arg) {
// NCHW -> NHWC
return createOp<ops::TransposeOp>(arg, std::vector<std::size_t>{0, 2, 3, 1})->getOutput(0);
}
-mir::IODescriptor ONNXOpCreator::convertMIRToONNX(mir::IODescriptor arg) {
+mir::Operation::Output* ONNXOpCreator::convertMIRToONNX(mir::Operation::Output* arg) {
// NHWC -> NCHW
return createOp<ops::TransposeOp>(arg, std::vector<std::size_t>{0, 3, 1, 2})->getOutput(0);
}
void setMirGraph(mir::Graph* g) { _graph = g; };
- std::vector<mir::IODescriptor>
- convertConv2D(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertConv2D(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertConcat(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertConcat(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertGivenTensorFill(const onnx::NodeProto& onnx_node,
InputTensors& input_tensors);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertConstant(const onnx::NodeProto& onnx_node,
- InputTensors& input_tensors);
+ InputTensors& input_tensors);
- std::vector<mir::IODescriptor>
- convertPool(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertPool(const std::vector<mir::Operation::Output*>& inputs,
ONNXOpCode op_code,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertPad(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertPad(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertSoftmax(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertReshape(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertReshape(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertRelu(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertRelu(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertSigmoid(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertSigmoid(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertUnsqueeze(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertUnsqueeze(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertUpsample(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertUpsample(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertElementwise(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertElementwise(const std::vector<mir::Operation::Output*>& inputs,
mir::ops::ElementwiseOp::OpType op_type);
- std::vector<mir::IODescriptor>
- convertScale(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertScale(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertShape(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertShape(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertBatchNorm(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertBatchNorm(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node,
InputTensors& input_tensors);
- std::vector<mir::IODescriptor>
- convertDropout(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertDropout(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertGather(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertGather(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- std::vector<mir::IODescriptor>
- convertGemm(const std::vector<mir::IODescriptor>& inputs,
+ std::vector<mir::Operation::Output*>
+ convertGemm(const std::vector<mir::Operation::Output*>& inputs,
const onnx::NodeProto& onnx_node);
- mir::IODescriptor convertONNXToMIR(mir::IODescriptor arg);
- mir::IODescriptor convertMIRToONNX(mir::IODescriptor arg);
+ mir::Operation::Output* convertONNXToMIR(mir::Operation::Output* arg);
+ mir::Operation::Output* convertMIRToONNX(mir::Operation::Output* arg);
private:
template <typename OpType, typename ...Types>
for (const auto& out : node->getOutputs()) {
const auto& consumers = out.getConsumers();
std::transform(consumers.begin(), consumers.end(), std::back_inserter(next_nodes),
- [](const Operation::Input* input) { return input->getNode(); });
+ [](Operation::Input* input) { return input->getNode(); });
}
if (edge == next_nodes.size()) {
// this node is fully analyzed, push it into RPO and pop from stack
// FIXME This is to work around deserializeTensors not being able to deserialize tensors of type
// other than float32.
- if (op.getOutput(0)->getConsumers().empty())
+ const auto* output = op.getOutput(0);
+ if (output->getConsumers().empty())
return;
appendOperationToInference(&op, "constant");
}
void TfliteImporter::walkOperator(const Operator* op) {
- std::vector<mir::IODescriptor> inputs = getMIRInputsForOperator(op);
- std::vector<mir::IODescriptor> outputs;
+ std::vector<mir::Operation::Output*> inputs = getMIRInputsForOperator(op);
+ std::vector<mir::Operation::Output*> outputs;
BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
switch (opcode) {
}
}
-std::vector<mir::IODescriptor> TfliteImporter::getMIRInputsForOperator(const Operator* op) {
- std::vector<mir::IODescriptor> inputs;
+std::vector<mir::Operation::Output*> TfliteImporter::getMIRInputsForOperator(const Operator* op) {
+ std::vector<mir::Operation::Output*> inputs;
try {
for (auto i : *(op->inputs())) {
std::vector<int32_t> _graphInputs;
std::vector<int32_t> _graphOutputs;
- // This map maps indices of TFLite tensors to MIR operations/nodes
- // that correspond to operations having these tensors as output.
- std::map<int, mir::IODescriptor> _tensorMap;
+ // Maps TFLite tensors indices to corresponding MIR operation outputs.
+ std::map<int, mir::Operation::Output*> _tensorMap;
+
// set of strings describing incorrect parts of network and parts of network unsupported by NNC
std::set<std::string> _problemsOpSet;
void setIrNodeNames();
/**
- * @brief Return MIR ops, preceding given tflite operator
+ * @brief Returns MIR operation outputs corresponding to the inputs of the given operator.
*/
- std::vector<mir::IODescriptor> getMIRInputsForOperator(const ::tflite::Operator* op);
+ std::vector<mir::Operation::Output*> getMIRInputsForOperator(const ::tflite::Operator* op);
mir::TensorVariant createTensor(const ::tflite::Tensor* t,
const ::tflite::Buffer* b);
return v;
}
-static const mir::TensorVariant& extractTensor(mir::IODescriptor descr) {
- auto constant_op = dynamic_cast<ops::ConstantOp*>(descr->getNode());
+static const mir::TensorVariant& extractTensor(const mir::Operation::Output* output) {
+ auto constant_op = dynamic_cast<const ops::ConstantOp*>(output->getNode());
if (constant_op == nullptr)
throw PassException("Non-constant input is not supported.");
return constant_op->getValue();
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertConv2D(const Conv2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto kernel = inputs.at(1);
auto bias = inputs.at(2);
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto kernel = inputs.at(1);
auto bias = inputs.at(2);
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertConcatenation(const ::tflite::ConcatenationOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto result = createOp<ops::ConcatOp>(inputs, opts->axis());
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertMaxPool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
const auto& input_shape = input->getShape();
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertAveragePool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
const auto& input_shape = input->getShape();
return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertSoftmax(const ::tflite::SoftmaxOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
// Softmax in TFLite is always 2-D.
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertSlice(const ::tflite::SliceOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(2)));
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertReshape(const ::tflite::ReshapeOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
// TODO: we should also support "-1" values in new_shape, which means that correct
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertTransposeConv(const ::tflite::TransposeConvOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
mir::Tensor<int32_t> output_shape_tensor(extractTensor(inputs.at(0)));
auto kernel = inputs.at(1);
auto input = inputs.at(2);
problems_op_set.insert("'align_corners' is not currently supported");
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(1)));
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::createElementwise(ops::ElementwiseOp::OpType op_type,
::tflite::ActivationFunctionType activation,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto result = createOp<ops::ElementwiseOp>(inputs, op_type);
return {addFusedActivation(result->getOutput(0), activation)};
}
-std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output*>& inputs) {
auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub);
- result = createOp<ops::ElementwiseOp>(std::vector<mir::IODescriptor>{
+ result = createOp<ops::ElementwiseOp>(std::vector<mir::Operation::Output*>{
result->getOutput(0),
result->getOutput(0)},
ops::ElementwiseOp::OpType::mul);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertMean(const ::tflite::ReducerOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
mir::Tensor<int32_t> axes_tensor(extractTensor(inputs.at(1)));
checkActivationType(opts->fused_activation_function(), problems_op_set);
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertFullyConnected(const ::tflite::FullyConnectedOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto weights = inputs.at(1);
auto bias = inputs.at(2);
+ EnumNameActivationFunctionType(activation_type));
}
-mir::IODescriptor TFLiteOpCreator::addFusedActivation(mir::IODescriptor input,
- ActivationFunctionType activation_type) {
+mir::Operation::Output*
+TFLiteOpCreator::addFusedActivation(mir::Operation::Output* input,
+ ActivationFunctionType activation_type) {
// TODO Support other activation function types.
switch (activation_type) {
case ActivationFunctionType_NONE:
}
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
std::vector<int32_t> squeeze_dims(opts->squeeze_dims()->begin(),
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertPad(const ::tflite::PadOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertTanh(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertTanh(const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto result = createOp<ops::TanhOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertReLU(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertReLU(const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto result = createOp<ops::ReluOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertReLU6(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertReLU6(const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto result = createOp<ops::CappedReluOp>(input, 6);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertSqrt(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertSqrt(const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto result = createOp<ops::SqrtOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertLogistic(const std::vector<mir::IODescriptor>& inputs) {
+std::vector<mir::Operation::Output*>
+TFLiteOpCreator::convertLogistic(const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto result = createOp<ops::SigmoidOp>(input);
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertTranspose(const ::tflite::TransposeOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
mir::Tensor<int32_t> perm_tensor(extractTensor(inputs.at(1)));
problems_op_set.insert("StridedSlice: parameter 'new_axis_mask' is not supported.");
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertStridedSlice(const ::tflite::StridedSliceOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
mir::Tensor<int32_t> end_tensor(extractTensor(inputs.at(2)));
return {result->getOutput(0)};
}
-std::vector<mir::IODescriptor>
+std::vector<mir::Operation::Output*>
TFLiteOpCreator::convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
auto input = inputs.at(0);
auto result = createOp<ops::LeakyReluOp>(input, opts->alpha());
public:
explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertConv2D(const ::tflite::Conv2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertConcatenation(const ::tflite::ConcatenationOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertMaxPool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertAveragePool2D(const ::tflite::Pool2DOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertMean(const ::tflite::ReducerOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertSoftmax(const ::tflite::SoftmaxOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertSlice(const ::tflite::SliceOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertReshape(const ::tflite::ReshapeOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertFullyConnected(const ::tflite::FullyConnectedOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertLogistic(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertLogistic(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertSqrt(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertSqrt(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertSqueeze(const ::tflite::SqueezeOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
createElementwise(ops::ElementwiseOp::OpType op_type,
::tflite::ActivationFunctionType activation,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertSquaredDifference(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertTanh(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertTanh(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertReLU(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertReLU(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
- convertReLU6(const std::vector<mir::IODescriptor>& inputs);
+ std::vector<mir::Operation::Output*>
+ convertReLU6(const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertTransposeConv(const ::tflite::TransposeConvOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertPad(const ::tflite::PadOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertTranspose(const ::tflite::TransposeOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertStridedSlice(const ::tflite::StridedSliceOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
- std::vector<mir::IODescriptor>
+ std::vector<mir::Operation::Output*>
convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
- const std::vector<mir::IODescriptor>& inputs);
+ const std::vector<mir::Operation::Output*>& inputs);
void checkPool2D(const ::tflite::Pool2DOptions* opts,
std::set<std::string>& problem_ops_set);
void checkActivationType(::tflite::ActivationFunctionType, std::set<std::string>&);
- mir::IODescriptor addFusedActivation(mir::IODescriptor input,
- ::tflite::ActivationFunctionType activation_type);
+ mir::Operation::Output* addFusedActivation(mir::Operation::Output* input,
+ ::tflite::ActivationFunctionType activation_type);
template<typename OpType, typename... Types>
mir::Operation* createOp(Types&&... args);
namespace {
-using OpConstructor = function<Operation*(Graph& g, vector<IODescriptor>& inputs)>;
+using OpConstructor = function<Operation*(Graph& g, vector<Operation::Output*>& inputs)>;
const char* artifactName = "nnmodel";
* */
void fillGraph(Graph& g, const OpConstructor& op_constr, const vector<Shape>& input_shapes) {
// Create graph inputs.
- vector<mir::IODescriptor> inputs;
+ vector<mir::Operation::Output*> inputs;
for (std::size_t i = 0; i < input_shapes.size(); ++i) {
auto input_op = g.create<ops::InputOp>("x" + to_string(i), input_shapes[i]);
inputs.push_back(input_op->getOutput(0));
TensorVariant constant_data = createTensorVariant(shape);
Graph g;
- OpConstructor op_generator = [&constant_data](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [&constant_data](Graph& g,
+ const vector<Operation::Output*>& inputs) {
return g.create<mir::ops::ConstantOp>("data", constant_data);
};
TensorVariant w = createTensorVariant({channels});
Graph g;
- OpConstructor op_generator = [&w](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [&w](Graph& g, const vector<Operation::Output*>& inputs) {
auto bias = g.create<mir::ops::ConstantOp>("", w)->getOutput(0);
return g.create<mir::ops::BiasAddOp>("bias", inputs[0], bias);
};
TensorVariant w = createTensorVariant({channels});
Graph g;
- OpConstructor op_generator = [&w](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [&w](Graph& g, const vector<Operation::Output*>& inputs) {
auto scale = g.create<mir::ops::ConstantOp>("", w)->getOutput(0);
return g.create<mir::ops::ScaleOp>("scale", inputs[0], scale);
};
TEST(acl_backend_mir_to_dom, concat) {
Graph g;
- OpConstructor op_generator = [](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [](Graph& g, const vector<Operation::Output*>& inputs) {
return g.create<mir::ops::ConcatOp>("concat", inputs, 3);
};
vector<Shape> input_shapes{{2, 3, 5, 1}, {2, 3, 5, 3}};
Graph g;
OpConstructor op_generator =
[kernel_tensor, strides](mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
std::vector<int32_t> padding{0, 0};
auto kernel = g.create<mir::ops::ConstantOp>("", kernel_tensor)->getOutput(0);
return g.create<mir::ops::Conv2DOp>("conv2d", inputs[0], kernel, strides, padding, padding);
Graph g;
OpConstructor op_generator =
[kernel_tensor, strides](mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
std::vector<int32_t> padding{0, 0};
auto kernel = g.create<mir::ops::ConstantOp>("", kernel_tensor)->getOutput(0);
return g.create<mir::ops::DepthwiseConv2DOp>("depthwiseConv2d", inputs[0], kernel,
TensorVariant weights_tensor = createTensorVariant(weights_shape);
Graph g;
- OpConstructor opGenerator = [weights_tensor](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor opGenerator = [weights_tensor](Graph& g, const vector<Operation::Output*>& inputs) {
auto weights = g.create<mir::ops::ConstantOp>("", weights_tensor)->getOutput(0);
return g.create<mir::ops::FullyConnectedOp>("fc", inputs[0], weights);
};
mir::Shape strides{1, 1};
Graph g;
- OpConstructor op_generator = [window_shape, strides](mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs) {
- std::vector<int32_t> padding{0, 0};
- return g.create<mir::ops::PoolOp>("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX,
- window_shape, strides, padding, padding,
- mir::ops::PoolOp::BorderType::EMPTY);
+ OpConstructor op_generator = [window_shape, strides](
+ mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
+ std::vector<int32_t> padding{0, 0};
+ return g.create<mir::ops::PoolOp>("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX,
+ window_shape, strides, padding, padding,
+ mir::ops::PoolOp::BorderType::EMPTY);
};
vector<Shape> input_shapes{{1, 10, 10, 3}};
}
TEST(acl_backend_mir_to_dom, relu) {
- OpConstructor op_generator = [](Graph& g, const std::vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [](Graph& g, const std::vector<Operation::Output*>& inputs) {
return g.create<mir::ops::ReluOp>("relu", inputs[0]);
};
TEST(acl_backend_mir_to_dom, capped_relu) {
float cap = 6;
- OpConstructor op_generator = [cap](Graph& g, const std::vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [cap](Graph& g, const std::vector<Operation::Output*>& inputs) {
return g.create<mir::ops::CappedReluOp>("capped_relu", inputs[0], cap);
};
}
TEST(acl_backend_mir_to_dom, sigmoid) {
- OpConstructor op_generator = [](Graph& g, const std::vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [](Graph& g, const std::vector<Operation::Output*>& inputs) {
return g.create<mir::ops::SigmoidOp>("sigmoid", inputs[0]);
};
}
TEST(acl_backend_mir_to_dom, tanh) {
- OpConstructor op_generator = [](Graph& g, const std::vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [](Graph& g, const std::vector<Operation::Output*>& inputs) {
return g.create<mir::ops::TanhOp>("tanh", inputs[0]);
};
TEST(acl_backend_mir_to_dom, softmax) {
Graph g;
- OpConstructor op_generator = [](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [](Graph& g, const vector<Operation::Output*>& inputs) {
return g.create<mir::ops::SoftmaxOp>("softmax", inputs[0], 3);
};
vector<Shape> input_shapes{{1, 1, 1, 3}};
Shape input_shape{1, h, w, c};
Shape output_shape{1, h * w * c};
- OpConstructor op_generator = [output_shape](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [output_shape](Graph& g, const vector<Operation::Output*>& inputs) {
return g.create<mir::ops::ReshapeOp>("reshape", inputs[0], output_shape);
};
vector<size_t> perm{0, 3, 1, 2};
Graph g;
- OpConstructor op_generator = [&perm](Graph& g, const vector<IODescriptor>& inputs) {
+ OpConstructor op_generator = [&perm](Graph& g, const vector<Operation::Output*>& inputs) {
return g.create<mir::ops::TransposeOp>("transpose", inputs[0], perm);
};
vector<Shape> input_shapes{{1, 10, 10, channels}};
auto n2 = g->create<ops::ReluOp>("op2", n1->getOutput(0));
auto n3 = g->create<ops::ReluOp>("op3", n2->getOutput(0));
auto n4 = g->create<ops::ReluOp>("op4", n2->getOutput(0));
- auto n5 = g->create<ops::ConcatOp>("op5",
- std::vector<IODescriptor>{n3->getOutput(0), n4->getOutput(0)},
- 0);
+ std::vector<Operation::Output*> concat_inputs{n3->getOutput(0), n4->getOutput(0)};
+ auto n5 = g->create<ops::ConcatOp>("op5", concat_inputs, 0);
g->replaceInputNodes({"op1", "op4"});
auto input = g.create<ops::InputOp>("input1", input_shape);
auto input2 = g.create<ops::InputOp>("input2", input2_shape);
- auto add = g.create<ops::ElementwiseOp>("add_1",
- std::vector<IODescriptor>{input->getOutput(0),
- input2->getOutput(0)},
- ops::ElementwiseOp::OpType::add);
+ std::vector<Operation::Output*> add_inputs{input->getOutput(0), input2->getOutput(0)};
+ auto add = g.create<ops::ElementwiseOp>("add_1", add_inputs, ops::ElementwiseOp::OpType::add);
ASSERT_EQ(add->getOutputShape(0), Shape({1, 10, 10, 10}));
}
*/
mir::Operation*
fillGraph(mir::Graph& g,
- const function<mir::Operation*(mir::Graph& g, vector<mir::IODescriptor>& inputs)>& op_gen,
+ const function<mir::Operation*(mir::Graph& g,
+ vector<mir::Operation::Output*>& inputs)>& op_gen,
const vector<unique_ptr<mir::TensorVariant>>& input_ntensors) {
// Create graph inputs.
- std::vector<mir::IODescriptor> inputs;
+ std::vector<mir::Operation::Output*> inputs;
for (std::size_t i = 0; i < input_ntensors.size(); ++i) {
auto input_op =
g.create<mir::ops::InputOp>("x" + std::to_string(i), input_ntensors[i]->getShape());
template <typename TestFunc, typename ...Args>
void createAndRunTestGraph(
function<mir::Operation*(mir::Graph&,
- const std::vector<mir::IODescriptor>& inputs)> op_generator,
+ const std::vector<mir::Operation::Output*>& inputs)> op_generator,
TestFunc artifactOperation,
const vector<unique_ptr<mir::TensorVariant>>& input_ntensors,
Args& ...input_atensors) {
fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::BiasAddOp>("y", inputs[0], inputs[1]);
};
fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ScaleOp>("y", inputs[0], inputs[1]);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [cap](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [cap](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::CappedReluOp>("y", inputs[0], cap);
};
vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
- auto op_generator = [axis](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [axis](mir::Graph& g,
+ const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ConcatOp>("y", inputs, axis);
};
vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::add);
};
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
fillTensors(input_ntensors[2], input_atensors[2], shape_data3, 3.0f);
- auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto opGenerator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::mul);
};
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 5.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
fillTensors(input_ntensors[2], input_atensors[2], shape_data3, 3.0f);
- auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto opGenerator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::div);
};
vector<unique_ptr<mir::TensorVariant>> input_ntensors(2);
fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::add);
};
fillTensors(input_n_tensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_n_tensors[1], input_atensors[1], shape_data, 2.0f);
fillTensors(input_n_tensors[2], input_atensors[2], shape_data, 3.0f);
- auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto opGenerator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs,
mir::ops::ElementwiseOp::OpType::sub);
};
fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
fillTensors(input_ntensors[2], input_atensors[2], shape_data, 3.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::mul);
};
fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
fillTensors(input_ntensors[2], input_atensors[2], shape_data, 3.0f);
fillTensors(input_ntensors[3], input_atensors[3], shape_data, 3.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ElementwiseOp>("y", inputs, mir::ops::ElementwiseOp::OpType::max);
};
fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f);
auto pad_t = mir::ops::PaddingType::Same;
auto op_generator = [&strides, pad_t](
- mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::DeConv2DOp>("y", inputs[0], inputs[1], strides, pad_t);
};
fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f);
auto op_generator = [&strides](mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
std::vector<int32_t> padding{0, 0};
return g.create<mir::ops::Conv2DOp>("y", inputs[0], inputs[1],
strides, padding, padding);
fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensor1, kernel_shape_data, 1.0f);
auto op_generator = [&strides](mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs) {
+ const std::vector<mir::Operation::Output*>& inputs) {
std::vector<int32_t> padding{0, 0};
return g.create<mir::ops::DepthwiseConv2DOp>("y", inputs[0], inputs[1],
strides, padding, padding);
Tensor input_atensor1;
fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::FullyConnectedOp>("y", inputs[0], inputs[1]);
};
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
Tensor input_atensor;
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
- auto op_generator = [&res_shape](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [&res_shape](mir::Graph& g,
+ const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ResizeOp>(
"y", inputs[0],
mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
Tensor input_atensor;
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
- auto op_generator = [&scales](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [&scales](mir::Graph& g,
+ const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ResizeOp>(
"y", inputs[0],
mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
template <irOps::PoolOp::PoolingType poolT>
static mir::Operation* createPool(mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs,
+ const std::vector<mir::Operation::Output*>& inputs,
mir::Shape& window_shape,
mir::Shape& strides,
irOps::PoolOp::BorderType border) {
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ReluOp>("y", inputs[0]);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::LeakyReluOp>("y", inputs[0], 0.1);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto opGenerator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::SigmoidOp>("y", inputs[0]);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::EluOp>("y", inputs[0], 1);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::TanhOp>("y", inputs[0]);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
- auto op_generator = [&axis_list, keep_dims](mir::Graph& g,
- const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [&axis_list, keep_dims](
+ mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
auto op = g.create<mir::ops::ReduceFOp>(
"y", inputs[0], axis_list, keep_dims,
mir::ops::ReduceFOp::FuncType::mean);
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [axis](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [axis](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::SoftmaxOp>("y", inputs[0], axis);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_n_tensor(1);
fillTensors(input_n_tensor[0], input_atensor, shape_data, 1.0f);
- auto op_gen = [st, sz](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_gen = [st, sz](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::SliceOp>("y", inputs[0], mir::Shape(st),
mir::Shape(sz));
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
- auto op_generator = [&output_nshape](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [&output_nshape](mir::Graph& g,
+ const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::ReshapeOp>("y", inputs[0], output_nshape);
};
Tensor input_atensor;
vector<unique_ptr<mir::TensorVariant>> input_ntensor(1);
fillTensors(input_ntensor[0], input_atensor, shape_data, 1.0f);
- auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::SqrtOp>("y", inputs[0]);
};
createAndRunTestGraph(op_generator, sqrtFN, input_ntensor, input_atensor);
mir::DTYPE::FLOAT32, sizeof(float));
auto op_generator = [num_dims, &paddings, &constant_value]
- (mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ (mir::Graph& g, const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::PadOp>("y", inputs[0], num_dims, paddings, constant_value);
};
{3, 2, 1, 0}
};
for (const auto& permute: test_cases_pack_4d) {
- auto op_generator = [&permute](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [&permute](mir::Graph& g,
+ const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::TransposeOp>("transpose", inputs[0], permute);
};
createAndRunTestGraph(op_generator, transpose, input_ntensor_4d, input_atensor_4d);
{2, 1, 0}
};
for (const auto& permute: test_cases_pack_3d) {
- auto op_generator = [&permute](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ auto op_generator = [&permute](mir::Graph& g,
+ const std::vector<mir::Operation::Output*>& inputs) {
return g.create<mir::ops::TransposeOp>("transpose", inputs[0], permute);
};
createAndRunTestGraph(op_generator, transpose, input_ntensor_3d, input_atensor_3d);
Operation* head2 = g.create<ops::ReluOp>("head2", input->getOutput(0));
Operation* tail1 = g.create<ops::ReluOp>("tail1", head1->getOutput(0));
Operation* tail2 = g.create<ops::ReluOp>("tail2", head2->getOutput(0));
- Operation* join = g.create<ops::ConcatOp>("join", std::vector<IODescriptor>{tail1->getOutput(0),
- tail2->getOutput(0)},
- 0);
+ std::vector<mir::Operation::Output*> concat_inputs{tail1->getOutput(0), tail2->getOutput(0)};
+ Operation* join = g.create<ops::ConcatOp>("join", concat_inputs, 0);
// Check that layout is desired
ModelAnalyzer ma;