Operation names are going to be removed.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
if (!isIdentityTranspose(combined_axis_order))
{
- auto new_tr_op = g->create<mir::ops::TransposeOp>(top_transpose->getName() + "new",
- top_transpose->getInput(0)->getProducer(),
+ auto new_tr_op = g->create<mir::ops::TransposeOp>(top_transpose->getInput(0)->getProducer(),
combined_axis_order);
g->replaceNode(bottom_transpose, new_tr_op);
TensorVariant res(DataType::FLOAT32, transpose_op->getOutputShape(0));
transpose(constant_op->getValue(), res, transpose_op->getAxisOrder());
- auto new_op = graph->create<ops::ConstantOp>("", res);
+ auto new_op = graph->create<ops::ConstantOp>(res);
graph->replaceNode(transpose_op, new_op);
opt_util::removeNodeIfUnused(graph, constant_op);
}
}
- return g->create<ops::ConstantOp>(const1_op->getName(), new_const_val);
+ return g->create<ops::ConstantOp>(new_const_val);
}
// TODO: support 'DepthwiseConv'->'Mul'
pre_relu.emplace_back(r->getInput(0)->getProducer());
}
// create replacement nodes
- auto new_concat =
- g->create<ops::ConcatOp>(concat->getName() + "_before_relu", pre_relu, concat->getAxis());
- auto new_relu =
- g->create<ops::ReluOp>(relus[0]->getName() + "_after_concat", new_concat->getOutput(0));
+ auto new_concat = g->create<ops::ConcatOp>(pre_relu, concat->getAxis());
+ auto new_relu = g->create<ops::ReluOp>(new_concat->getOutput(0));
// concat is deleted here
g->replaceNode(concat, new_relu);
{
prev_trans.emplace_back(transpose->getInput(0)->getProducer());
}
- auto new_concat = g->create<ops::ConcatOp>(concat->getName() + "_transposed", prev_trans,
- axis_order[concat->getAxis()]);
- auto new_transpose = g->create<ops::TransposeOp>(trs[0]->getName() + "_after_concat",
- new_concat->getOutput(0), axis_order);
+ auto new_concat = g->create<ops::ConcatOp>(prev_trans, axis_order[concat->getAxis()]);
+ auto new_transpose = g->create<ops::TransposeOp>(new_concat->getOutput(0), axis_order);
// removes old concat
g->replaceNode(concat, new_transpose);
for (auto tr : trs)
static void fillGraph(Graph &g)
{
Shape input_shape{1, 2, 3};
- Operation *input_op = g.create<ops::InputOp>("in", input_shape);
- Operation *relu_op = g.create<ops::ReluOp>("relu", input_op->getOutput(0));
- Operation *output_op = g.create<ops::OutputOp>("out", relu_op->getOutput(0));
+ Operation *input_op = g.create<ops::InputOp>(input_shape);
+ Operation *relu_op = g.create<ops::ReluOp>(input_op->getOutput(0));
+ Operation *output_op = g.create<ops::OutputOp>(relu_op->getOutput(0));
input_op->getOutput(0)->setName("in");
relu_op->getOutput(0)->setName("out");
}
vector<mir::Operation::Output *> inputs;
for (std::size_t i = 0; i < input_shapes.size(); ++i)
{
- auto input_op = g.create<ops::InputOp>("x" + to_string(i), input_shapes[i]);
- inputs.push_back(input_op->getOutput(0));
+ auto input = g.create<ops::InputOp>(input_shapes[i])->getOutput(0);
+ input->setName("x" + to_string(i));
+ inputs.push_back(input);
}
// Create the operation.
// Create graph outputs.
for (std::size_t i = 0; i < op->getNumOutputs(); ++i)
- g.create<ops::OutputOp>("y" + to_string(i), op->getOutput(i));
+ {
+ op->getOutput(i)->setName("y" + to_string(i));
+ g.create<ops::OutputOp>(op->getOutput(i));
+ }
}
/**
data_ptr[i] = i;
return TensorVariant(DataType::FLOAT32, shape, data_ptr);
}
-}
+} // namespace
// Actual tests
Graph g;
OpConstructor op_generator = [&constant_data](Graph &g,
const vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::ConstantOp>("data", constant_data);
+ return g.create<mir::ops::ConstantOp>(constant_data);
};
fillGraph(g, op_generator, {});
{
Graph g;
OpConstructor op_generator = [](Graph &g, const vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::ConcatOp>("concat", inputs, 3);
+ return g.create<mir::ops::ConcatOp>(inputs, 3);
};
vector<Shape> input_shapes{{2, 3, 5, 1}, {2, 3, 5, 3}};
OpConstructor op_generator =
[kernel_tensor, strides](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
std::vector<int32_t> padding{0, 0};
- auto kernel = g.create<mir::ops::ConstantOp>("", kernel_tensor)->getOutput(0);
- return g.create<mir::ops::Conv2DOp>("conv2d", inputs[0], kernel, strides, padding, padding);
+ auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
+ return g.create<mir::ops::Conv2DOp>(inputs[0], kernel, strides, padding, padding);
};
vector<Shape> input_shapes{{1, 10, 10, channels}};
OpConstructor op_generator =
[kernel_tensor, strides](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
std::vector<int32_t> padding{0, 0};
- auto kernel = g.create<mir::ops::ConstantOp>("", kernel_tensor)->getOutput(0);
- return g.create<mir::ops::DepthwiseConv2DOp>("depthwiseConv2d", inputs[0], kernel, strides,
- padding, padding);
+ auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
+ return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], kernel, strides, padding, padding);
};
vector<Shape> input_shapes{{1, 10, 10, channels}};
Graph g;
OpConstructor opGenerator = [weights_tensor](Graph &g,
const vector<Operation::Output *> &inputs) {
- auto weights = g.create<mir::ops::ConstantOp>("", weights_tensor)->getOutput(0);
- return g.create<mir::ops::FullyConnectedOp>("fc", inputs[0], weights);
+ auto weights = g.create<mir::ops::ConstantOp>(weights_tensor)->getOutput(0);
+ return g.create<mir::ops::FullyConnectedOp>(inputs[0], weights);
};
fillGraph(g, opGenerator, {input_shape_data});
OpConstructor op_generator =
[window_shape, strides](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
std::vector<int32_t> padding{0, 0};
- return g.create<mir::ops::PoolOp>("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX,
- window_shape, strides, padding, padding,
+ return g.create<mir::ops::PoolOp>(inputs[0], ops::PoolOp::PoolingType::MAX, window_shape,
+ strides, padding, padding,
mir::ops::PoolOp::BorderType::EMPTY);
};
TEST(acl_backend_mir_to_dom, relu)
{
OpConstructor op_generator = [](Graph &g, const std::vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::ReluOp>("relu", inputs[0]);
+ return g.create<mir::ops::ReluOp>(inputs[0]);
};
testActivationOp(op_generator);
{
float cap = 6;
OpConstructor op_generator = [cap](Graph &g, const std::vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::CappedReluOp>("capped_relu", inputs[0], cap);
+ return g.create<mir::ops::CappedReluOp>(inputs[0], cap);
};
testActivationOp(op_generator);
TEST(acl_backend_mir_to_dom, sigmoid)
{
OpConstructor op_generator = [](Graph &g, const std::vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::SigmoidOp>("sigmoid", inputs[0]);
+ return g.create<mir::ops::SigmoidOp>(inputs[0]);
};
testActivationOp(op_generator);
TEST(acl_backend_mir_to_dom, tanh)
{
OpConstructor op_generator = [](Graph &g, const std::vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::TanhOp>("tanh", inputs[0]);
+ return g.create<mir::ops::TanhOp>(inputs[0]);
};
testActivationOp(op_generator);
{
Graph g;
OpConstructor op_generator = [](Graph &g, const vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::SoftmaxOp>("softmax", inputs[0], 3);
+ return g.create<mir::ops::SoftmaxOp>(inputs[0], 3);
};
vector<Shape> input_shapes{{1, 1, 1, 3}};
Shape output_shape{1, h * w * c};
OpConstructor op_generator = [output_shape](Graph &g, const vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::ReshapeOp>("reshape", inputs[0], output_shape);
+ return g.create<mir::ops::ReshapeOp>(inputs[0], output_shape);
};
fillGraph(g, op_generator, {input_shape});
Graph g;
OpConstructor op_generator = [&perm](Graph &g, const vector<Operation::Output *> &inputs) {
- return g.create<mir::ops::TransposeOp>("transpose", inputs[0], perm);
+ return g.create<mir::ops::TransposeOp>(inputs[0], perm);
};
vector<Shape> input_shapes{{1, 10, 10, channels}};
* ||
* [relu]
*/
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
- Operation *tr1 = g.create<ops::TransposeOp>("tr", input->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *tr15 = g.create<ops::TransposeOp>("tr", tr1->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *tr2 = g.create<ops::TransposeOp>("tr", tr15->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *relu = g.create<ops::ReluOp>("relu", tr2->getOutput(0));
+ Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *tr15 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *tr2 = g.create<ops::TransposeOp>(tr15->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *relu = g.create<ops::ReluOp>(tr2->getOutput(0));
// Check that layout is desired
std::stringstream ss;
pass.run(&g);
g.accept(&d);
// Assert only 1 transpose remains
- ASSERT_EQ("i_input.t_tr.r_relu.", ss.str());
+ ASSERT_EQ("i_0.t_1.r_4.", ss.str());
}
TEST(OptPass, combineTransposesLinear)
* ||
* [relu]
*/
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
- Operation *tr1 = g.create<ops::TransposeOp>("tr1", input->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *tr2 = g.create<ops::TransposeOp>("tr2", tr1->getOutput(0), vector<size_t>{0, 2, 1});
- Operation *relu = g.create<ops::ReluOp>("relu", tr2->getOutput(0));
+ Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *tr2 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{0, 2, 1});
+ Operation *relu = g.create<ops::ReluOp>(tr2->getOutput(0));
std::stringstream ss;
DumpVisitor d(ss);
g.accept(&d);
// Assert transposes are combined
- ASSERT_EQ("i_input.t_tr1new.r_relu.", ss.str());
+ ASSERT_EQ("i_0.t_4.r_3.", ss.str());
auto ax_ord_actual = dynamic_cast<ops::TransposeOp *>(
(*(g.getInputs()[0]->getOutput(0)->getConsumers().begin()))->getNode())
->getAxisOrder();
* \\ //
* [Add]
*/
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3, 2});
- Operation *tr1 =
- g.create<ops::TransposeOp>("tr1", input->getOutput(0), vector<size_t>{1, 0, 2, 3});
- Operation *tr2 = g.create<ops::TransposeOp>("tr2", tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
- Operation *tr3 = g.create<ops::TransposeOp>("tr3", tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
- Operation *elw = g.create<ops::AddOp>("elewiseAdd", tr2->getOutput(0), tr3->getOutput(0));
+ Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3, 2});
+ Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2, 3});
+ Operation *tr2 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
+ Operation *tr3 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
+ Operation *elw = g.create<ops::AddOp>(tr2->getOutput(0), tr3->getOutput(0));
std::stringstream ss;
DumpVisitor d(ss);
CombineTransposes pass;
pass.run(&g);
g.accept(&d);
- ASSERT_EQ("i_input.b_elewiseAdd.", ss.str());
- ASSERT_EQ(elw->getInput(0)->getProducer()->getNode()->getName(), "input");
- ASSERT_EQ(elw->getInput(1)->getProducer()->getNode()->getName(), "input");
+ ASSERT_EQ("i_0.b_4.", ss.str());
+ ASSERT_EQ(elw->getInput(0)->getProducer()->getNode()->getType(), mir::Operation::Type::input);
+ ASSERT_EQ(elw->getInput(1)->getProducer()->getNode()->getType(), mir::Operation::Type::input);
}
TEST(OptPass, combineTransposesOpOrder)
* \\ //
* [Add]
*/
- Operation *in1 = g.create<ops::InputOp>("inp1", Shape{1, 2, 3});
- Operation *in2 = g.create<ops::InputOp>("inp2", Shape{1, 2, 3});
- Operation *tr0 = g.create<ops::TransposeOp>("tr0", in1->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *tr1 = g.create<ops::TransposeOp>("tr1", in2->getOutput(0), vector<size_t>{2, 1, 0});
- Operation *tr2 = g.create<ops::TransposeOp>("tr2", tr0->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *tr3 = g.create<ops::TransposeOp>("tr3", tr1->getOutput(0), vector<size_t>{2, 1, 0});
- Operation *elw = g.create<ops::AddOp>("elewiseAdd", tr2->getOutput(0), tr3->getOutput(0));
- g.create<ops::OutputOp>("out", elw->getOutput(0));
+ Operation *in1 = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *in2 = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *tr0 = g.create<ops::TransposeOp>(in1->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *tr1 = g.create<ops::TransposeOp>(in2->getOutput(0), vector<size_t>{2, 1, 0});
+ Operation *tr2 = g.create<ops::TransposeOp>(tr0->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *tr3 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{2, 1, 0});
+ Operation *elw = g.create<ops::AddOp>(tr2->getOutput(0), tr3->getOutput(0));
+ g.create<ops::OutputOp>(elw->getOutput(0));
int n1 = elw->getInput(0)->getNode()->getInput(0)->getNode()->getInput(0)->getNode()->getId();
int n2 = elw->getInput(1)->getNode()->getInput(0)->getNode()->getInput(0)->getNode()->getId();
CombineTransposes pass;
pass.run(&g);
- ASSERT_EQ(g.getOutputs()[0]->getInput(0)->getProducer()->getNode()->getName(), "elewiseAdd");
+ ASSERT_EQ(g.getOutputs()[0]->getInput(0)->getProducer()->getNode()->getType(),
+ mir::Operation::Type::add);
// Order is preserved
ASSERT_EQ(n1, elw->getInput(0)->getNode()->getId());
ASSERT_EQ(n2, elw->getInput(1)->getNode()->getId());
mir::Graph g;
// Create graph: 'input->conv->bias->scale->scale->bias'
- auto input = g.create<ops::InputOp>("input", Shape{1, 299, 299, 3});
- auto conv_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
+ auto input = g.create<ops::InputOp>(Shape{1, 299, 299, 3});
+ auto conv_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
std::vector<int32_t> padding{0, 0};
- auto conv = g.create<ops::Conv2DOp>("conv", input->getOutput(0), conv_const->getOutput(0),
- Shape{1, 1}, padding, padding);
- auto bias1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
- auto bias1 = g.create<ops::AddOp>("bias1", conv->getOutput(0), bias1_const->getOutput(0));
- auto scale1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
- auto scale1 = g.create<ops::MulOp>("scale1", bias1->getOutput(0), scale1_const->getOutput(0));
- auto scale2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
- auto scale2 = g.create<ops::MulOp>("scale2", scale1->getOutput(0), scale2_const->getOutput(0));
- auto scale3_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
- auto scale3 = g.create<ops::MulOp>("scale3", scale2->getOutput(0), scale3_const->getOutput(0));
- auto bias2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
- g.create<ops::AddOp>("", scale3->getOutput(0), bias2_const->getOutput(0));
+ auto conv = g.create<ops::Conv2DOp>(input->getOutput(0), conv_const->getOutput(0), Shape{1, 1},
+ padding, padding);
+ auto bias1_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+ auto bias1 = g.create<ops::AddOp>(conv->getOutput(0), bias1_const->getOutput(0));
+ auto scale1_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+ auto scale1 = g.create<ops::MulOp>(bias1->getOutput(0), scale1_const->getOutput(0));
+ auto scale2_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+ auto scale2 = g.create<ops::MulOp>(scale1->getOutput(0), scale2_const->getOutput(0));
+ auto scale3_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+ auto scale3 = g.create<ops::MulOp>(scale2->getOutput(0), scale3_const->getOutput(0));
+ auto bias2_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+ g.create<ops::AddOp>(scale3->getOutput(0), bias2_const->getOutput(0));
// Check that layout is desired
std::stringstream ss;
pass.run(&g);
g.accept(&d);
// Assert only 'conv->bias' remains
- ASSERT_TRUE("i_input.const_.const_.conv_conv.b_bias1." == ss.str() ||
- "const_.i_input.const_.conv_conv.b_bias1." == ss.str() ||
- "const_.const_.i_input.conv_conv.b_bias1." == ss.str());
+ ASSERT_TRUE("i_0.const_25.const_23.conv_26.b_24." == ss.str() ||
+ "i_0.const_23.const_25.conv_26.b_24." == ss.str() ||
+ "const_25.i_0.const_23.conv_26.b_24." == ss.str() ||
+ "const_23.i_0.const_25.conv_26.b_24." == ss.str() ||
+ "const_25.const_23.i_0.conv_26.b_24." == ss.str() ||
+ "const_23.const_25.i_0.conv_26.b_24." == ss.str());
}
} // unnamed namespace
* ||
* [relu]
*/
- Operation *C0 = g.create<ops::ConstantOp>("C0", TensorVariant(DataType::FLOAT32, {2, 2}));
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
- Operation *C1 = g.create<ops::ConstantOp>("C1", TensorVariant(DataType::FLOAT32, {2, 2}));
- Operation *C2 = g.create<ops::ConstantOp>("C2", TensorVariant(DataType::FLOAT32, {2, 2}));
- Operation *relu = g.create<ops::ReluOp>("relu", input->getOutput(0));
+ Operation *C0 = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {2, 2}));
+ Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *C1 = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {2, 2}));
+ Operation *C2 = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {2, 2}));
+ Operation *relu = g.create<ops::ReluOp>(input->getOutput(0));
std::stringstream ss;
RemoveDeadEnds pass;
* ||
* [tanh]
*/
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
- Operation *tr1 = g.create<ops::TransposeOp>("tr1", input->getOutput(0), vector<size_t>{1, 0, 2});
- Operation *relu = g.create<ops::ReluOp>("relu", tr1->getOutput(0));
- Operation *tanh = g.create<ops::TanhOp>("tanh", relu->getOutput(0));
- Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+ Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2});
+ Operation *relu = g.create<ops::ReluOp>(tr1->getOutput(0));
+ Operation *tanh = g.create<ops::TanhOp>(relu->getOutput(0));
+ Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
(void)out;
// Check that layout is desired
pass.run(&g);
// Assert transposes are removed
- ASSERT_EQ(g.getInputs()[0]->getName(), "input");
- ASSERT_EQ(getPrev(g.getOutputs()[0])->getName(), "tanh");
- ASSERT_EQ(getNext(g.getInputs()[0])->getName(), "relu");
- ASSERT_EQ(getPrev(tanh)->getName(), "tr1");
+ ASSERT_EQ(g.getInputs()[0]->getType(), mir::Operation::Type::input);
+ ASSERT_EQ(getPrev(g.getOutputs()[0])->getType(), mir::Operation::Type::tanh);
+ ASSERT_EQ(getNext(g.getInputs()[0])->getType(), mir::Operation::Type::ReLU);
+ ASSERT_EQ(getPrev(tanh)->getType(), mir::Operation::Type::transpose);
}
/* This tests swapping concat and transpose */
* ||
* [TanH]
*/
- Operation *in1 = g.create<ops::InputOp>("inp1", Shape{1, 1, 2, 3});
- Operation *in2 = g.create<ops::InputOp>("inp2", Shape{1, 1, 2, 3});
- Operation *tr1 = g.create<ops::TransposeOp>("tr1", in1->getOutput(0), vector<size_t>{0, 3, 1, 2});
- Operation *tr2 = g.create<ops::TransposeOp>("tr2", in2->getOutput(0), vector<size_t>{0, 3, 1, 2});
- Operation *conc = g.create<ops::ConcatOp>(
- "concat", vector<Operation::Output *>{tr1->getOutput(0), tr2->getOutput(0)}, 1);
- Operation *tanh = g.create<ops::TanhOp>("tanh", conc->getOutput(0));
- Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+ Operation *in1 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+ Operation *in2 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+ Operation *tr1 = g.create<ops::TransposeOp>(in1->getOutput(0), vector<size_t>{0, 3, 1, 2});
+ Operation *tr2 = g.create<ops::TransposeOp>(in2->getOutput(0), vector<size_t>{0, 3, 1, 2});
+ Operation *conc =
+ g.create<ops::ConcatOp>(vector<Operation::Output *>{tr1->getOutput(0), tr2->getOutput(0)}, 1);
+ Operation *tanh = g.create<ops::TanhOp>(conc->getOutput(0));
+ Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
(void)out;
// Check that layout is as desired
SinkTranspose pass;
* ||
* [TanH]
*/
- Operation *in1 = g.create<ops::InputOp>("inp1", Shape{1, 1, 2, 3});
- Operation *in2 = g.create<ops::InputOp>("inp2", Shape{1, 1, 2, 3});
- Operation *relu1 = g.create<ops::ReluOp>("relu1", in1->getOutput(0));
- Operation *relu2 = g.create<ops::ReluOp>("relu2", in2->getOutput(0));
+ Operation *in1 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+ Operation *in2 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+ Operation *relu1 = g.create<ops::ReluOp>(in1->getOutput(0));
+ Operation *relu2 = g.create<ops::ReluOp>(in2->getOutput(0));
Operation *conc = g.create<ops::ConcatOp>(
- "concat", vector<Operation::Output *>{relu1->getOutput(0), relu2->getOutput(0)}, 1);
- Operation *tanh = g.create<ops::TanhOp>("tanh", conc->getOutput(0));
- Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+ vector<Operation::Output *>{relu1->getOutput(0), relu2->getOutput(0)}, 1);
+ Operation *tanh = g.create<ops::TanhOp>(conc->getOutput(0));
+ Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
(void)out;
// Check that layout is as desired
* ||
* [tanh]
*/
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 4, 4, 3});
- Operation *relu = g.create<ops::ReluOp>("relu", input->getOutput(0));
- Operation *mp = g.create<ops::PoolOp>("pool", relu->getOutput(0), ops::PoolOp::PoolingType::MAX,
+ Operation *input = g.create<ops::InputOp>(Shape{1, 4, 4, 3});
+ Operation *relu = g.create<ops::ReluOp>(input->getOutput(0));
+ Operation *mp = g.create<ops::PoolOp>(relu->getOutput(0), ops::PoolOp::PoolingType::MAX,
Shape{2, 2}, Shape{2, 2}, vector<int32_t>{0, 0},
vector<int32_t>{0, 0}, ops::PoolOp::BorderType::EMPTY);
- Operation *tanh = g.create<ops::TanhOp>("tanh", mp->getOutput(0));
- Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+ Operation *tanh = g.create<ops::TanhOp>(mp->getOutput(0));
+ Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
(void)out;
SinkRelu pass;
g.accept(&d);
// tanh(relu(pool(input)))
- ASSERT_EQ(getNext(g.getInputs()[0])->getName(), "pool");
- ASSERT_EQ(getPrev(g.getOutputs()[0])->getName(), "tanh");
- ASSERT_EQ("i_input.p_pool.r_relu.th_tanh.", ss.str());
+ ASSERT_EQ(getNext(g.getInputs()[0])->getType(), mir::Operation::Type::pool);
+ ASSERT_EQ(getPrev(g.getOutputs()[0])->getType(), mir::Operation::Type::tanh);
+ ASSERT_EQ("i_0.p_5.r_6.th_3.", ss.str());
}
} // unnamed namespace
public:
explicit DumpVisitor(std::ostream &s) : _s(s) {}
- void visit(mir::ops::InputOp &op) override { _s << "i_" << op.getName() << "."; };
+ void visit(mir::ops::InputOp &op) override { _s << "i_" << std::to_string(op.getId()) << "."; };
- void visit(mir::ops::TanhOp &op) override { _s << "th_" << op.getName() << "."; }
+ void visit(mir::ops::TanhOp &op) override { _s << "th_" << std::to_string(op.getId()) << "."; }
- void visit(mir::ops::MulOp &op) override { _s << "s_" << op.getName() << "."; }
+ void visit(mir::ops::MulOp &op) override { _s << "s_" << std::to_string(op.getId()) << "."; }
- void visit(mir::ops::AddOp &op) override { _s << "b_" << op.getName() << "."; }
+ void visit(mir::ops::AddOp &op) override { _s << "b_" << std::to_string(op.getId()) << "."; }
- void visit(mir::ops::ReluOp &op) override { _s << "r_" << op.getName() << "."; }
+ void visit(mir::ops::ReluOp &op) override { _s << "r_" << std::to_string(op.getId()) << "."; }
- void visit(mir::ops::PoolOp &op) override { _s << "p_" << op.getName() << "."; }
+ void visit(mir::ops::PoolOp &op) override { _s << "p_" << std::to_string(op.getId()) << "."; }
- void visit(mir::ops::TransposeOp &op) override { _s << "t_" << op.getName() << "."; }
+ void visit(mir::ops::TransposeOp &op) override
+ {
+ _s << "t_" << std::to_string(op.getId()) << ".";
+ }
- void visit(mir::ops::Conv2DOp &op) override { _s << "conv_" << op.getName() << "."; }
+ void visit(mir::ops::Conv2DOp &op) override
+ {
+ _s << "conv_" << std::to_string(op.getId()) << ".";
+ }
- void visit(mir::ops::ConstantOp &op) override { _s << "const_" << op.getName() << "."; }
+ void visit(mir::ops::ConstantOp &op) override
+ {
+ _s << "const_" << std::to_string(op.getId()) << ".";
+ }
std::ostream &_s;
};
std::vector<mir::Operation::Output *> inputs;
for (std::size_t i = 0; i < input_ntensors.size(); ++i)
{
- auto input_op =
- g.create<mir::ops::InputOp>("x" + std::to_string(i), input_ntensors[i]->getShape());
+ auto input_op = g.create<mir::ops::InputOp>(input_ntensors[i]->getShape());
input_op->getOutput(0)->setName("x" + std::to_string(i));
inputs.push_back(input_op->getOutput(0));
}
// Create graph outputs.
assert(op->getNumOutputs() == 1);
- g.create<mir::ops::OutputOp>(op->getName(), op->getOutput(0));
- op->setName("");
+ g.create<mir::ops::OutputOp>(op->getOutput(0));
return op;
}
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto op_generator = [cap](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::CappedReluOp>("y", inputs[0], cap);
+ return g.create<mir::ops::CappedReluOp>(inputs[0], cap);
};
createAndRunTestGraph(op_generator, cappedRelu, input_ntensors, input_atensor);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
auto op_generator = [axis](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::ConcatOp>("y", inputs, axis);
+ return g.create<mir::ops::ConcatOp>(inputs, axis);
};
createAndRunTestGraph(op_generator, concat<Tensor, Tensor>, input_ntensors, input_atensors[0],
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::AddOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::AddOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(op_generator, ElementWise<Add, Tensor, Tensor>, input_ntensors,
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::MulOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::MulOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(opGenerator, ElementWise<Mul, Tensor, Tensor>, input_ntensors,
fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 5.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::DivOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::DivOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(opGenerator, ElementWise<Div, Tensor, Tensor>, input_ntensors,
fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::AddOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::AddOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(op_generator, ElementWise<Add, Tensor, Tensor>, input_ntensors,
fillTensors(input_n_tensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_n_tensors[1], input_atensors[1], shape_data, 2.0f);
auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::SubOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::SubOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(opGenerator, ElementWise<Sub, Tensor, Tensor>, input_n_tensors,
fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::MulOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::MulOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(op_generator, ElementWise<Mul, Tensor, Tensor>, input_ntensors,
fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::MaxOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::MaxOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(op_generator, ElementWise<Max, Tensor, Tensor>, input_ntensors,
auto pad_t = mir::ops::PaddingType::Same;
auto op_generator = [&strides, pad_t](
mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::DeConv2DOp>("y", inputs[0], inputs[1], strides, pad_t);
+ return g.create<mir::ops::DeConv2DOp>(inputs[0], inputs[1], strides, pad_t);
};
createAndRunTestGraph(op_generator, convTransposed2d, input_ntensors, input_atensor0,
auto op_generator = [&strides](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
std::vector<int32_t> padding{0, 0};
- return g.create<mir::ops::Conv2DOp>("y", inputs[0], inputs[1], strides, padding,
+ return g.create<mir::ops::Conv2DOp>(inputs[0], inputs[1], strides, padding,
padding);
};
auto op_generator = [&strides](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
std::vector<int32_t> padding{0, 0};
- return g.create<mir::ops::DepthwiseConv2DOp>("y", inputs[0], inputs[1], strides,
- padding, padding);
+ return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], inputs[1], strides, padding,
+ padding);
};
createAndRunTestGraph(op_generator, depthwiseConv2d, input_ntensors, input_atensor0,
fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::FullyConnectedOp>("y", inputs[0], inputs[1]);
+ return g.create<mir::ops::FullyConnectedOp>(inputs[0], inputs[1]);
};
createAndRunTestGraph(op_generator, fullConnect, input_ntensors, input_atensor0, input_atensor1);
auto op_generator = [&res_shape](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
return g.create<mir::ops::ResizeOp>(
- "y", inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+ inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
};
createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
auto op_generator = [&scales](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
return g.create<mir::ops::ResizeOp>(
- "y", inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
+ inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
};
createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
}
mir::Shape &window_shape, mir::Shape &strides, irOps::PoolOp::BorderType border)
{
std::vector<int32_t> padding{0, 0};
- return g.create<mir::ops::PoolOp>("pool", inputs[0], poolT, window_shape, strides, padding,
- padding, border);
+ return g.create<mir::ops::PoolOp>(inputs[0], poolT, window_shape, strides, padding, padding,
+ border);
};
template <irOps::PoolOp::PoolingType poolT, typename Func>
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::ReluOp>("y", inputs[0]);
+ return g.create<mir::ops::ReluOp>(inputs[0]);
};
createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::LeakyReluOp>("y", inputs[0], 0.1);
+ return g.create<mir::ops::LeakyReluOp>(inputs[0], 0.1);
};
createAndRunTestGraph(op_generator, leakyRelu, input_ntensors, input_atensor);
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::SigmoidOp>("y", inputs[0]);
+ return g.create<mir::ops::SigmoidOp>(inputs[0]);
};
createAndRunTestGraph(opGenerator, sigmoid, input_ntensors, input_atensor);
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::EluOp>("y", inputs[0], 1);
+ return g.create<mir::ops::EluOp>(inputs[0], 1);
};
createAndRunTestGraph(op_generator, elu, input_ntensors, input_atensor);
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::TanhOp>("y", inputs[0]);
+ return g.create<mir::ops::TanhOp>(inputs[0]);
};
createAndRunTestGraph(op_generator, tanhActivation, input_ntensors, input_atensor);
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
auto op_generator = [&axis_list, keep_dims](
mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- auto op = g.create<mir::ops::ReduceOp>("y", inputs[0], axis_list, keep_dims,
+ auto op = g.create<mir::ops::ReduceOp>(inputs[0], axis_list, keep_dims,
mir::ops::ReduceOp::FuncType::mean);
return op;
};
vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
auto op_generator = [axis](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::SoftmaxOp>("y", inputs[0], axis);
+ return g.create<mir::ops::SoftmaxOp>(inputs[0], axis);
};
createAndRunTestGraph(op_generator, softmax, input_ntensors, input_atensor);
vector<unique_ptr<mir::TensorVariant>> input_n_tensor(1);
fillTensors(input_n_tensor[0], input_atensor, shape_data, 1.0f);
auto op_gen = [st, sz](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::SliceOp>("y", inputs[0], mir::Shape(st), mir::Shape(sz));
+ return g.create<mir::ops::SliceOp>(inputs[0], mir::Shape(st), mir::Shape(sz));
};
createAndRunTestGraph(op_gen, slice, input_n_tensor, input_atensor);
}
fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
auto op_generator = [&output_nshape](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::ReshapeOp>("y", inputs[0], output_nshape);
+ return g.create<mir::ops::ReshapeOp>(inputs[0], output_nshape);
};
createAndRunTestGraph(op_generator, reshape, input_ntensors, input_atensor);
vector<unique_ptr<mir::TensorVariant>> input_ntensor(1);
fillTensors(input_ntensor[0], input_atensor, shape_data, 1.0f);
auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::SqrtOp>("y", inputs[0]);
+ return g.create<mir::ops::SqrtOp>(inputs[0]);
};
createAndRunTestGraph(op_generator, sqrtFN, input_ntensor, input_atensor);
}
auto op_generator = [&padding_before, &padding_after, padding_value](
mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::PadOp>("y", inputs[0], padding_before, padding_after, padding_value);
+ return g.create<mir::ops::PadOp>(inputs[0], padding_before, padding_after, padding_value);
};
createAndRunTestGraph(op_generator, pad, input_ntensor, input_atensor);
{
auto op_generator = [&permute](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::TransposeOp>("transpose", inputs[0], permute);
+ return g.create<mir::ops::TransposeOp>(inputs[0], permute);
};
createAndRunTestGraph(op_generator, transpose, input_ntensor_4d, input_atensor_4d);
}
{
auto op_generator = [&permute](mir::Graph &g,
const std::vector<mir::Operation::Output *> &inputs) {
- return g.create<mir::ops::TransposeOp>("transpose", inputs[0], permute);
+ return g.create<mir::ops::TransposeOp>(inputs[0], permute);
};
createAndRunTestGraph(op_generator, transpose, input_ntensor_3d, input_atensor_3d);
}
cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
mir::Graph g;
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3, 4});
- input->getOutput(0)->setName("input");
- Operation *output = g.create<ops::ReluOp>("output", input->getOutput(0));
+ Operation::Output *input = g.create<ops::InputOp>(Shape{1, 2, 3, 4})->getOutput(0);
+ input->setName("input");
+ Operation *output = g.create<ops::ReluOp>(input);
// test that generator creates output dir and files
if (isFileExists(TEST_DIR))
* \ /
* [join]
*/
- Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
- Operation *head1 = g.create<ops::ReluOp>("head1", input->getOutput(0));
- Operation *head2 = g.create<ops::ReluOp>("head2", input->getOutput(0));
- Operation *tail1 = g.create<ops::ReluOp>("tail1", head1->getOutput(0));
- Operation *tail2 = g.create<ops::ReluOp>("tail2", head2->getOutput(0));
+ Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+ Operation *head1 = g.create<ops::ReluOp>(input->getOutput(0));
+ Operation *head2 = g.create<ops::ReluOp>(input->getOutput(0));
+ Operation *tail1 = g.create<ops::ReluOp>(head1->getOutput(0));
+ Operation *tail2 = g.create<ops::ReluOp>(head2->getOutput(0));
std::vector<mir::Operation::Output *> concat_inputs{tail1->getOutput(0), tail2->getOutput(0)};
- Operation *join = g.create<ops::ConcatOp>("join", concat_inputs, 0);
+ Operation *join = g.create<ops::ConcatOp>(concat_inputs, 0);
input->getOutput(0)->setName("input");
head1->getOutput(0)->setName("head1");
head2->getOutput(0)->setName("head2");