[nnc] Do not set / check names of operations (#6981)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Wed, 28 Aug 2019 12:47:57 +0000 (21:47 +0900)
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Wed, 28 Aug 2019 12:47:57 +0000 (15:47 +0300)
Operation names are going to be removed.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
15 files changed:
compiler/nnc/passes/optimizations/CombineTransposes.cpp
compiler/nnc/passes/optimizations/ConstantFoldTranspose.cpp
compiler/nnc/passes/optimizations/FuseArithmeticOps.cpp
compiler/nnc/passes/optimizations/SinkRelu.cpp
compiler/nnc/passes/optimizations/SinkTranspose.cpp
compiler/nnc/tests/soft_backend/CompileCPP.cpp
compiler/nnc/unittests/acl_backend/MIRToDOM.cpp
compiler/nnc/unittests/optimizations/CombineTransposes.cpp
compiler/nnc/unittests/optimizations/FuseArithmeticOps.cpp
compiler/nnc/unittests/optimizations/RemoveDeadEnds.cpp
compiler/nnc/unittests/optimizations/SinkTest.cpp
compiler/nnc/unittests/optimizations/Util.h
compiler/nnc/unittests/soft_backend/CPPOperations.cpp
compiler/nnc/unittests/soft_backend/Generator.cpp
compiler/nnc/unittests/soft_backend/ModelAnalyzer.cpp

index 296100f..12aab94 100644 (file)
@@ -76,8 +76,7 @@ nnc::PassData nnc::CombineTransposes::run(nnc::PassData data)
 
       if (!isIdentityTranspose(combined_axis_order))
       {
-        auto new_tr_op = g->create<mir::ops::TransposeOp>(top_transpose->getName() + "new",
-                                                          top_transpose->getInput(0)->getProducer(),
+        auto new_tr_op = g->create<mir::ops::TransposeOp>(top_transpose->getInput(0)->getProducer(),
                                                           combined_axis_order);
 
         g->replaceNode(bottom_transpose, new_tr_op);
index 69f2b21..f23d5b2 100644 (file)
@@ -72,7 +72,7 @@ PassData ConstantFoldTranspose::run(PassData data)
       TensorVariant res(DataType::FLOAT32, transpose_op->getOutputShape(0));
       transpose(constant_op->getValue(), res, transpose_op->getAxisOrder());
 
-      auto new_op = graph->create<ops::ConstantOp>("", res);
+      auto new_op = graph->create<ops::ConstantOp>(res);
 
       graph->replaceNode(transpose_op, new_op);
       opt_util::removeNodeIfUnused(graph, constant_op);
index 36d8270..f601d05 100644 (file)
@@ -128,7 +128,7 @@ Operation *mergeConstantOps(Graph *g, const ops::ConstantOp *const1_op,
     }
   }
 
-  return g->create<ops::ConstantOp>(const1_op->getName(), new_const_val);
+  return g->create<ops::ConstantOp>(new_const_val);
 }
 
 // TODO: support 'DepthwiseConv'->'Mul'
index 5b6ef3b..f0d4280 100644 (file)
@@ -66,10 +66,8 @@ PassData SinkRelu::run(PassData data)
       pre_relu.emplace_back(r->getInput(0)->getProducer());
     }
     // create replacement nodes
-    auto new_concat =
-        g->create<ops::ConcatOp>(concat->getName() + "_before_relu", pre_relu, concat->getAxis());
-    auto new_relu =
-        g->create<ops::ReluOp>(relus[0]->getName() + "_after_concat", new_concat->getOutput(0));
+    auto new_concat = g->create<ops::ConcatOp>(pre_relu, concat->getAxis());
+    auto new_relu = g->create<ops::ReluOp>(new_concat->getOutput(0));
 
     // concat is deleted here
     g->replaceNode(concat, new_relu);
index 39389a0..b1c7a0d 100644 (file)
@@ -65,10 +65,8 @@ PassData SinkTranspose::run(PassData data)
       {
         prev_trans.emplace_back(transpose->getInput(0)->getProducer());
       }
-      auto new_concat = g->create<ops::ConcatOp>(concat->getName() + "_transposed", prev_trans,
-                                                 axis_order[concat->getAxis()]);
-      auto new_transpose = g->create<ops::TransposeOp>(trs[0]->getName() + "_after_concat",
-                                                       new_concat->getOutput(0), axis_order);
+      auto new_concat = g->create<ops::ConcatOp>(prev_trans, axis_order[concat->getAxis()]);
+      auto new_transpose = g->create<ops::TransposeOp>(new_concat->getOutput(0), axis_order);
       // removes old concat
       g->replaceNode(concat, new_transpose);
       for (auto tr : trs)
index 566d391..3540cf9 100644 (file)
@@ -48,9 +48,9 @@ using namespace mir;
 static void fillGraph(Graph &g)
 {
   Shape input_shape{1, 2, 3};
-  Operation *input_op = g.create<ops::InputOp>("in", input_shape);
-  Operation *relu_op = g.create<ops::ReluOp>("relu", input_op->getOutput(0));
-  Operation *output_op = g.create<ops::OutputOp>("out", relu_op->getOutput(0));
+  Operation *input_op = g.create<ops::InputOp>(input_shape);
+  Operation *relu_op = g.create<ops::ReluOp>(input_op->getOutput(0));
+  Operation *output_op = g.create<ops::OutputOp>(relu_op->getOutput(0));
   input_op->getOutput(0)->setName("in");
   relu_op->getOutput(0)->setName("out");
 }
index e225b14..c1617f1 100644 (file)
@@ -74,8 +74,9 @@ void fillGraph(Graph &g, const OpConstructor &op_constr, const vector<Shape> &in
   vector<mir::Operation::Output *> inputs;
   for (std::size_t i = 0; i < input_shapes.size(); ++i)
   {
-    auto input_op = g.create<ops::InputOp>("x" + to_string(i), input_shapes[i]);
-    inputs.push_back(input_op->getOutput(0));
+    auto input = g.create<ops::InputOp>(input_shapes[i])->getOutput(0);
+    input->setName("x" + to_string(i));
+    inputs.push_back(input);
   }
 
   // Create the operation.
@@ -83,7 +84,10 @@ void fillGraph(Graph &g, const OpConstructor &op_constr, const vector<Shape> &in
 
   // Create graph outputs.
   for (std::size_t i = 0; i < op->getNumOutputs(); ++i)
-    g.create<ops::OutputOp>("y" + to_string(i), op->getOutput(i));
+  {
+    op->getOutput(i)->setName("y" + to_string(i));
+    g.create<ops::OutputOp>(op->getOutput(i));
+  }
 }
 
 /**
@@ -208,7 +212,7 @@ TensorVariant createTensorVariant(const Shape &shape)
     data_ptr[i] = i;
   return TensorVariant(DataType::FLOAT32, shape, data_ptr);
 }
-}
+} // namespace
 
 // Actual tests
 
@@ -220,7 +224,7 @@ TEST(acl_backend_mir_to_dom, constant)
   Graph g;
   OpConstructor op_generator = [&constant_data](Graph &g,
                                                 const vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::ConstantOp>("data", constant_data);
+    return g.create<mir::ops::ConstantOp>(constant_data);
   };
 
   fillGraph(g, op_generator, {});
@@ -237,7 +241,7 @@ TEST(acl_backend_mir_to_dom, concat)
 {
   Graph g;
   OpConstructor op_generator = [](Graph &g, const vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::ConcatOp>("concat", inputs, 3);
+    return g.create<mir::ops::ConcatOp>(inputs, 3);
   };
   vector<Shape> input_shapes{{2, 3, 5, 1}, {2, 3, 5, 3}};
 
@@ -285,8 +289,8 @@ TEST(acl_backend_mir_to_dom, conv2d)
   OpConstructor op_generator =
       [kernel_tensor, strides](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
         std::vector<int32_t> padding{0, 0};
-        auto kernel = g.create<mir::ops::ConstantOp>("", kernel_tensor)->getOutput(0);
-        return g.create<mir::ops::Conv2DOp>("conv2d", inputs[0], kernel, strides, padding, padding);
+        auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
+        return g.create<mir::ops::Conv2DOp>(inputs[0], kernel, strides, padding, padding);
       };
 
   vector<Shape> input_shapes{{1, 10, 10, channels}};
@@ -312,9 +316,8 @@ TEST(acl_backend_mir_to_dom, depthwise_conv)
   OpConstructor op_generator =
       [kernel_tensor, strides](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
         std::vector<int32_t> padding{0, 0};
-        auto kernel = g.create<mir::ops::ConstantOp>("", kernel_tensor)->getOutput(0);
-        return g.create<mir::ops::DepthwiseConv2DOp>("depthwiseConv2d", inputs[0], kernel, strides,
-                                                     padding, padding);
+        auto kernel = g.create<mir::ops::ConstantOp>(kernel_tensor)->getOutput(0);
+        return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], kernel, strides, padding, padding);
       };
 
   vector<Shape> input_shapes{{1, 10, 10, channels}};
@@ -343,8 +346,8 @@ TEST(acl_backend_mir_to_dom, fully_connected)
   Graph g;
   OpConstructor opGenerator = [weights_tensor](Graph &g,
                                                const vector<Operation::Output *> &inputs) {
-    auto weights = g.create<mir::ops::ConstantOp>("", weights_tensor)->getOutput(0);
-    return g.create<mir::ops::FullyConnectedOp>("fc", inputs[0], weights);
+    auto weights = g.create<mir::ops::ConstantOp>(weights_tensor)->getOutput(0);
+    return g.create<mir::ops::FullyConnectedOp>(inputs[0], weights);
   };
 
   fillGraph(g, opGenerator, {input_shape_data});
@@ -369,8 +372,8 @@ TEST(acl_backend_mir_to_dom, maxpool)
   OpConstructor op_generator =
       [window_shape, strides](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
         std::vector<int32_t> padding{0, 0};
-        return g.create<mir::ops::PoolOp>("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX,
-                                          window_shape, strides, padding, padding,
+        return g.create<mir::ops::PoolOp>(inputs[0], ops::PoolOp::PoolingType::MAX, window_shape,
+                                          strides, padding, padding,
                                           mir::ops::PoolOp::BorderType::EMPTY);
       };
 
@@ -419,7 +422,7 @@ static void testActivationOp(const OpConstructor &op_generator)
 TEST(acl_backend_mir_to_dom, relu)
 {
   OpConstructor op_generator = [](Graph &g, const std::vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::ReluOp>("relu", inputs[0]);
+    return g.create<mir::ops::ReluOp>(inputs[0]);
   };
 
   testActivationOp(op_generator);
@@ -429,7 +432,7 @@ TEST(acl_backend_mir_to_dom, capped_relu)
 {
   float cap = 6;
   OpConstructor op_generator = [cap](Graph &g, const std::vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::CappedReluOp>("capped_relu", inputs[0], cap);
+    return g.create<mir::ops::CappedReluOp>(inputs[0], cap);
   };
 
   testActivationOp(op_generator);
@@ -438,7 +441,7 @@ TEST(acl_backend_mir_to_dom, capped_relu)
 TEST(acl_backend_mir_to_dom, sigmoid)
 {
   OpConstructor op_generator = [](Graph &g, const std::vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::SigmoidOp>("sigmoid", inputs[0]);
+    return g.create<mir::ops::SigmoidOp>(inputs[0]);
   };
 
   testActivationOp(op_generator);
@@ -452,7 +455,7 @@ TEST(acl_backend_mir_to_dom, DISABLED_elu)
 TEST(acl_backend_mir_to_dom, tanh)
 {
   OpConstructor op_generator = [](Graph &g, const std::vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::TanhOp>("tanh", inputs[0]);
+    return g.create<mir::ops::TanhOp>(inputs[0]);
   };
 
   testActivationOp(op_generator);
@@ -467,7 +470,7 @@ TEST(acl_backend_mir_to_dom, softmax)
 {
   Graph g;
   OpConstructor op_generator = [](Graph &g, const vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::SoftmaxOp>("softmax", inputs[0], 3);
+    return g.create<mir::ops::SoftmaxOp>(inputs[0], 3);
   };
   vector<Shape> input_shapes{{1, 1, 1, 3}};
 
@@ -496,7 +499,7 @@ TEST(acl_backend_mir_to_dom, reshape)
   Shape output_shape{1, h * w * c};
 
   OpConstructor op_generator = [output_shape](Graph &g, const vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::ReshapeOp>("reshape", inputs[0], output_shape);
+    return g.create<mir::ops::ReshapeOp>(inputs[0], output_shape);
   };
 
   fillGraph(g, op_generator, {input_shape});
@@ -526,7 +529,7 @@ TEST(acl_backend_mir_to_dom, transpose)
 
   Graph g;
   OpConstructor op_generator = [&perm](Graph &g, const vector<Operation::Output *> &inputs) {
-    return g.create<mir::ops::TransposeOp>("transpose", inputs[0], perm);
+    return g.create<mir::ops::TransposeOp>(inputs[0], perm);
   };
   vector<Shape> input_shapes{{1, 10, 10, channels}};
 
index d0e57b5..517755f 100644 (file)
@@ -40,11 +40,11 @@ TEST(OptPass, eliminateTransposesLinear)
    *        ||
    *      [relu]
    */
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
-  Operation *tr1 = g.create<ops::TransposeOp>("tr", input->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *tr15 = g.create<ops::TransposeOp>("tr", tr1->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *tr2 = g.create<ops::TransposeOp>("tr", tr15->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *relu = g.create<ops::ReluOp>("relu", tr2->getOutput(0));
+  Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *tr15 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *tr2 = g.create<ops::TransposeOp>(tr15->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *relu = g.create<ops::ReluOp>(tr2->getOutput(0));
 
   // Check that layout is desired
   std::stringstream ss;
@@ -53,7 +53,7 @@ TEST(OptPass, eliminateTransposesLinear)
   pass.run(&g);
   g.accept(&d);
   // Assert only 1 transpose remains
-  ASSERT_EQ("i_input.t_tr.r_relu.", ss.str());
+  ASSERT_EQ("i_0.t_1.r_4.", ss.str());
 }
 
 TEST(OptPass, combineTransposesLinear)
@@ -68,10 +68,10 @@ TEST(OptPass, combineTransposesLinear)
    *        ||
    *      [relu]
    */
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
-  Operation *tr1 = g.create<ops::TransposeOp>("tr1", input->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *tr2 = g.create<ops::TransposeOp>("tr2", tr1->getOutput(0), vector<size_t>{0, 2, 1});
-  Operation *relu = g.create<ops::ReluOp>("relu", tr2->getOutput(0));
+  Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *tr2 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{0, 2, 1});
+  Operation *relu = g.create<ops::ReluOp>(tr2->getOutput(0));
 
   std::stringstream ss;
   DumpVisitor d(ss);
@@ -80,7 +80,7 @@ TEST(OptPass, combineTransposesLinear)
   g.accept(&d);
 
   // Assert transposes are combined
-  ASSERT_EQ("i_input.t_tr1new.r_relu.", ss.str());
+  ASSERT_EQ("i_0.t_4.r_3.", ss.str());
   auto ax_ord_actual = dynamic_cast<ops::TransposeOp *>(
                            (*(g.getInputs()[0]->getOutput(0)->getConsumers().begin()))->getNode())
                            ->getAxisOrder();
@@ -100,20 +100,19 @@ TEST(OptPass, combineTransposesBush)
    *       \\       //
    *          [Add]
    */
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3, 2});
-  Operation *tr1 =
-      g.create<ops::TransposeOp>("tr1", input->getOutput(0), vector<size_t>{1, 0, 2, 3});
-  Operation *tr2 = g.create<ops::TransposeOp>("tr2", tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
-  Operation *tr3 = g.create<ops::TransposeOp>("tr3", tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
-  Operation *elw = g.create<ops::AddOp>("elewiseAdd", tr2->getOutput(0), tr3->getOutput(0));
+  Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3, 2});
+  Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2, 3});
+  Operation *tr2 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
+  Operation *tr3 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{1, 0, 2, 3});
+  Operation *elw = g.create<ops::AddOp>(tr2->getOutput(0), tr3->getOutput(0));
   std::stringstream ss;
   DumpVisitor d(ss);
   CombineTransposes pass;
   pass.run(&g);
   g.accept(&d);
-  ASSERT_EQ("i_input.b_elewiseAdd.", ss.str());
-  ASSERT_EQ(elw->getInput(0)->getProducer()->getNode()->getName(), "input");
-  ASSERT_EQ(elw->getInput(1)->getProducer()->getNode()->getName(), "input");
+  ASSERT_EQ("i_0.b_4.", ss.str());
+  ASSERT_EQ(elw->getInput(0)->getProducer()->getNode()->getType(), mir::Operation::Type::input);
+  ASSERT_EQ(elw->getInput(1)->getProducer()->getNode()->getType(), mir::Operation::Type::input);
 }
 
 TEST(OptPass, combineTransposesOpOrder)
@@ -128,19 +127,20 @@ TEST(OptPass, combineTransposesOpOrder)
    *       \\       //
    *          [Add]
    */
-  Operation *in1 = g.create<ops::InputOp>("inp1", Shape{1, 2, 3});
-  Operation *in2 = g.create<ops::InputOp>("inp2", Shape{1, 2, 3});
-  Operation *tr0 = g.create<ops::TransposeOp>("tr0", in1->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *tr1 = g.create<ops::TransposeOp>("tr1", in2->getOutput(0), vector<size_t>{2, 1, 0});
-  Operation *tr2 = g.create<ops::TransposeOp>("tr2", tr0->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *tr3 = g.create<ops::TransposeOp>("tr3", tr1->getOutput(0), vector<size_t>{2, 1, 0});
-  Operation *elw = g.create<ops::AddOp>("elewiseAdd", tr2->getOutput(0), tr3->getOutput(0));
-  g.create<ops::OutputOp>("out", elw->getOutput(0));
+  Operation *in1 = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *in2 = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *tr0 = g.create<ops::TransposeOp>(in1->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *tr1 = g.create<ops::TransposeOp>(in2->getOutput(0), vector<size_t>{2, 1, 0});
+  Operation *tr2 = g.create<ops::TransposeOp>(tr0->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *tr3 = g.create<ops::TransposeOp>(tr1->getOutput(0), vector<size_t>{2, 1, 0});
+  Operation *elw = g.create<ops::AddOp>(tr2->getOutput(0), tr3->getOutput(0));
+  g.create<ops::OutputOp>(elw->getOutput(0));
   int n1 = elw->getInput(0)->getNode()->getInput(0)->getNode()->getInput(0)->getNode()->getId();
   int n2 = elw->getInput(1)->getNode()->getInput(0)->getNode()->getInput(0)->getNode()->getId();
   CombineTransposes pass;
   pass.run(&g);
-  ASSERT_EQ(g.getOutputs()[0]->getInput(0)->getProducer()->getNode()->getName(), "elewiseAdd");
+  ASSERT_EQ(g.getOutputs()[0]->getInput(0)->getProducer()->getNode()->getType(),
+            mir::Operation::Type::add);
   // Order is preserved
   ASSERT_EQ(n1, elw->getInput(0)->getNode()->getId());
   ASSERT_EQ(n2, elw->getInput(1)->getNode()->getId());
index da75592..7b8886c 100644 (file)
@@ -36,21 +36,21 @@ TEST(OptPass, fuseConvBiasScaleScaleBias)
   mir::Graph g;
 
   // Create graph: 'input->conv->bias->scale->scale->bias'
-  auto input = g.create<ops::InputOp>("input", Shape{1, 299, 299, 3});
-  auto conv_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
+  auto input = g.create<ops::InputOp>(Shape{1, 299, 299, 3});
+  auto conv_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
   std::vector<int32_t> padding{0, 0};
-  auto conv = g.create<ops::Conv2DOp>("conv", input->getOutput(0), conv_const->getOutput(0),
-                                      Shape{1, 1}, padding, padding);
-  auto bias1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
-  auto bias1 = g.create<ops::AddOp>("bias1", conv->getOutput(0), bias1_const->getOutput(0));
-  auto scale1_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
-  auto scale1 = g.create<ops::MulOp>("scale1", bias1->getOutput(0), scale1_const->getOutput(0));
-  auto scale2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
-  auto scale2 = g.create<ops::MulOp>("scale2", scale1->getOutput(0), scale2_const->getOutput(0));
-  auto scale3_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
-  auto scale3 = g.create<ops::MulOp>("scale3", scale2->getOutput(0), scale3_const->getOutput(0));
-  auto bias2_const = g.create<ops::ConstantOp>("", TensorVariant(DataType::FLOAT32, {10}));
-  g.create<ops::AddOp>("", scale3->getOutput(0), bias2_const->getOutput(0));
+  auto conv = g.create<ops::Conv2DOp>(input->getOutput(0), conv_const->getOutput(0), Shape{1, 1},
+                                      padding, padding);
+  auto bias1_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+  auto bias1 = g.create<ops::AddOp>(conv->getOutput(0), bias1_const->getOutput(0));
+  auto scale1_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+  auto scale1 = g.create<ops::MulOp>(bias1->getOutput(0), scale1_const->getOutput(0));
+  auto scale2_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+  auto scale2 = g.create<ops::MulOp>(scale1->getOutput(0), scale2_const->getOutput(0));
+  auto scale3_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+  auto scale3 = g.create<ops::MulOp>(scale2->getOutput(0), scale3_const->getOutput(0));
+  auto bias2_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
+  g.create<ops::AddOp>(scale3->getOutput(0), bias2_const->getOutput(0));
 
   // Check that layout is desired
   std::stringstream ss;
@@ -59,9 +59,12 @@ TEST(OptPass, fuseConvBiasScaleScaleBias)
   pass.run(&g);
   g.accept(&d);
   // Assert only 'conv->bias' remains
-  ASSERT_TRUE("i_input.const_.const_.conv_conv.b_bias1." == ss.str() ||
-              "const_.i_input.const_.conv_conv.b_bias1." == ss.str() ||
-              "const_.const_.i_input.conv_conv.b_bias1." == ss.str());
+  ASSERT_TRUE("i_0.const_25.const_23.conv_26.b_24." == ss.str() ||
+              "i_0.const_23.const_25.conv_26.b_24." == ss.str() ||
+              "const_25.i_0.const_23.conv_26.b_24." == ss.str() ||
+              "const_23.i_0.const_25.conv_26.b_24." == ss.str() ||
+              "const_25.const_23.i_0.conv_26.b_24." == ss.str() ||
+              "const_23.const_25.i_0.conv_26.b_24." == ss.str());
 }
 
 } // unnamed namespace
index c9bc868..afed25b 100644 (file)
@@ -34,11 +34,11 @@ TEST(OptPass, removeDeadEndConstants)
    *        ||
    *      [relu]
    */
-  Operation *C0 = g.create<ops::ConstantOp>("C0", TensorVariant(DataType::FLOAT32, {2, 2}));
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
-  Operation *C1 = g.create<ops::ConstantOp>("C1", TensorVariant(DataType::FLOAT32, {2, 2}));
-  Operation *C2 = g.create<ops::ConstantOp>("C2", TensorVariant(DataType::FLOAT32, {2, 2}));
-  Operation *relu = g.create<ops::ReluOp>("relu", input->getOutput(0));
+  Operation *C0 = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {2, 2}));
+  Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *C1 = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {2, 2}));
+  Operation *C2 = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {2, 2}));
+  Operation *relu = g.create<ops::ReluOp>(input->getOutput(0));
 
   std::stringstream ss;
   RemoveDeadEnds pass;
index d20edf2..6344bf4 100644 (file)
@@ -62,11 +62,11 @@ TEST(OptPass, sinkTrReLU)
    *        ||
    *      [tanh]
    */
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
-  Operation *tr1 = g.create<ops::TransposeOp>("tr1", input->getOutput(0), vector<size_t>{1, 0, 2});
-  Operation *relu = g.create<ops::ReluOp>("relu", tr1->getOutput(0));
-  Operation *tanh = g.create<ops::TanhOp>("tanh", relu->getOutput(0));
-  Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+  Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *tr1 = g.create<ops::TransposeOp>(input->getOutput(0), vector<size_t>{1, 0, 2});
+  Operation *relu = g.create<ops::ReluOp>(tr1->getOutput(0));
+  Operation *tanh = g.create<ops::TanhOp>(relu->getOutput(0));
+  Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
   (void)out;
 
   // Check that layout is desired
@@ -74,10 +74,10 @@ TEST(OptPass, sinkTrReLU)
   pass.run(&g);
 
   // Assert transposes are removed
-  ASSERT_EQ(g.getInputs()[0]->getName(), "input");
-  ASSERT_EQ(getPrev(g.getOutputs()[0])->getName(), "tanh");
-  ASSERT_EQ(getNext(g.getInputs()[0])->getName(), "relu");
-  ASSERT_EQ(getPrev(tanh)->getName(), "tr1");
+  ASSERT_EQ(g.getInputs()[0]->getType(), mir::Operation::Type::input);
+  ASSERT_EQ(getPrev(g.getOutputs()[0])->getType(), mir::Operation::Type::tanh);
+  ASSERT_EQ(getNext(g.getInputs()[0])->getType(), mir::Operation::Type::ReLU);
+  ASSERT_EQ(getPrev(tanh)->getType(), mir::Operation::Type::transpose);
 }
 
 /* This tests swapping concat and transpose */
@@ -94,14 +94,14 @@ TEST(OptPass, sinkTrConcat)
    *            ||
    *          [TanH]
    */
-  Operation *in1 = g.create<ops::InputOp>("inp1", Shape{1, 1, 2, 3});
-  Operation *in2 = g.create<ops::InputOp>("inp2", Shape{1, 1, 2, 3});
-  Operation *tr1 = g.create<ops::TransposeOp>("tr1", in1->getOutput(0), vector<size_t>{0, 3, 1, 2});
-  Operation *tr2 = g.create<ops::TransposeOp>("tr2", in2->getOutput(0), vector<size_t>{0, 3, 1, 2});
-  Operation *conc = g.create<ops::ConcatOp>(
-      "concat", vector<Operation::Output *>{tr1->getOutput(0), tr2->getOutput(0)}, 1);
-  Operation *tanh = g.create<ops::TanhOp>("tanh", conc->getOutput(0));
-  Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+  Operation *in1 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+  Operation *in2 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+  Operation *tr1 = g.create<ops::TransposeOp>(in1->getOutput(0), vector<size_t>{0, 3, 1, 2});
+  Operation *tr2 = g.create<ops::TransposeOp>(in2->getOutput(0), vector<size_t>{0, 3, 1, 2});
+  Operation *conc =
+      g.create<ops::ConcatOp>(vector<Operation::Output *>{tr1->getOutput(0), tr2->getOutput(0)}, 1);
+  Operation *tanh = g.create<ops::TanhOp>(conc->getOutput(0));
+  Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
   (void)out;
   // Check that layout is as desired
   SinkTranspose pass;
@@ -129,14 +129,14 @@ TEST(OptPass, sinkReluConcat)
    *            ||
    *          [TanH]
    */
-  Operation *in1 = g.create<ops::InputOp>("inp1", Shape{1, 1, 2, 3});
-  Operation *in2 = g.create<ops::InputOp>("inp2", Shape{1, 1, 2, 3});
-  Operation *relu1 = g.create<ops::ReluOp>("relu1", in1->getOutput(0));
-  Operation *relu2 = g.create<ops::ReluOp>("relu2", in2->getOutput(0));
+  Operation *in1 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+  Operation *in2 = g.create<ops::InputOp>(Shape{1, 1, 2, 3});
+  Operation *relu1 = g.create<ops::ReluOp>(in1->getOutput(0));
+  Operation *relu2 = g.create<ops::ReluOp>(in2->getOutput(0));
   Operation *conc = g.create<ops::ConcatOp>(
-      "concat", vector<Operation::Output *>{relu1->getOutput(0), relu2->getOutput(0)}, 1);
-  Operation *tanh = g.create<ops::TanhOp>("tanh", conc->getOutput(0));
-  Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+      vector<Operation::Output *>{relu1->getOutput(0), relu2->getOutput(0)}, 1);
+  Operation *tanh = g.create<ops::TanhOp>(conc->getOutput(0));
+  Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
   (void)out;
 
   // Check that layout is as desired
@@ -163,13 +163,13 @@ TEST(OptPass, sinkPoolReLU)
    *        ||
    *      [tanh]
    */
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 4, 4, 3});
-  Operation *relu = g.create<ops::ReluOp>("relu", input->getOutput(0));
-  Operation *mp = g.create<ops::PoolOp>("pool", relu->getOutput(0), ops::PoolOp::PoolingType::MAX,
+  Operation *input = g.create<ops::InputOp>(Shape{1, 4, 4, 3});
+  Operation *relu = g.create<ops::ReluOp>(input->getOutput(0));
+  Operation *mp = g.create<ops::PoolOp>(relu->getOutput(0), ops::PoolOp::PoolingType::MAX,
                                         Shape{2, 2}, Shape{2, 2}, vector<int32_t>{0, 0},
                                         vector<int32_t>{0, 0}, ops::PoolOp::BorderType::EMPTY);
-  Operation *tanh = g.create<ops::TanhOp>("tanh", mp->getOutput(0));
-  Operation *out = g.create<ops::OutputOp>("out", tanh->getOutput(0));
+  Operation *tanh = g.create<ops::TanhOp>(mp->getOutput(0));
+  Operation *out = g.create<ops::OutputOp>(tanh->getOutput(0));
   (void)out;
 
   SinkRelu pass;
@@ -179,8 +179,8 @@ TEST(OptPass, sinkPoolReLU)
   g.accept(&d);
 
   // tanh(relu(pool(input)))
-  ASSERT_EQ(getNext(g.getInputs()[0])->getName(), "pool");
-  ASSERT_EQ(getPrev(g.getOutputs()[0])->getName(), "tanh");
-  ASSERT_EQ("i_input.p_pool.r_relu.th_tanh.", ss.str());
+  ASSERT_EQ(getNext(g.getInputs()[0])->getType(), mir::Operation::Type::pool);
+  ASSERT_EQ(getPrev(g.getOutputs()[0])->getType(), mir::Operation::Type::tanh);
+  ASSERT_EQ("i_0.p_5.r_6.th_3.", ss.str());
 }
 } // unnamed namespace
index 6004702..de1a80c 100644 (file)
@@ -36,23 +36,32 @@ class DumpVisitor : public mir::Visitor
 public:
   explicit DumpVisitor(std::ostream &s) : _s(s) {}
 
-  void visit(mir::ops::InputOp &op) override { _s << "i_" << op.getName() << "."; };
+  void visit(mir::ops::InputOp &op) override { _s << "i_" << std::to_string(op.getId()) << "."; };
 
-  void visit(mir::ops::TanhOp &op) override { _s << "th_" << op.getName() << "."; }
+  void visit(mir::ops::TanhOp &op) override { _s << "th_" << std::to_string(op.getId()) << "."; }
 
-  void visit(mir::ops::MulOp &op) override { _s << "s_" << op.getName() << "."; }
+  void visit(mir::ops::MulOp &op) override { _s << "s_" << std::to_string(op.getId()) << "."; }
 
-  void visit(mir::ops::AddOp &op) override { _s << "b_" << op.getName() << "."; }
+  void visit(mir::ops::AddOp &op) override { _s << "b_" << std::to_string(op.getId()) << "."; }
 
-  void visit(mir::ops::ReluOp &op) override { _s << "r_" << op.getName() << "."; }
+  void visit(mir::ops::ReluOp &op) override { _s << "r_" << std::to_string(op.getId()) << "."; }
 
-  void visit(mir::ops::PoolOp &op) override { _s << "p_" << op.getName() << "."; }
+  void visit(mir::ops::PoolOp &op) override { _s << "p_" << std::to_string(op.getId()) << "."; }
 
-  void visit(mir::ops::TransposeOp &op) override { _s << "t_" << op.getName() << "."; }
+  void visit(mir::ops::TransposeOp &op) override
+  {
+    _s << "t_" << std::to_string(op.getId()) << ".";
+  }
 
-  void visit(mir::ops::Conv2DOp &op) override { _s << "conv_" << op.getName() << "."; }
+  void visit(mir::ops::Conv2DOp &op) override
+  {
+    _s << "conv_" << std::to_string(op.getId()) << ".";
+  }
 
-  void visit(mir::ops::ConstantOp &op) override { _s << "const_" << op.getName() << "."; }
+  void visit(mir::ops::ConstantOp &op) override
+  {
+    _s << "const_" << std::to_string(op.getId()) << ".";
+  }
 
   std::ostream &_s;
 };
index 7407731..eb6c7c2 100644 (file)
@@ -127,8 +127,7 @@ fillGraph(mir::Graph &g,
   std::vector<mir::Operation::Output *> inputs;
   for (std::size_t i = 0; i < input_ntensors.size(); ++i)
   {
-    auto input_op =
-        g.create<mir::ops::InputOp>("x" + std::to_string(i), input_ntensors[i]->getShape());
+    auto input_op = g.create<mir::ops::InputOp>(input_ntensors[i]->getShape());
     input_op->getOutput(0)->setName("x" + std::to_string(i));
     inputs.push_back(input_op->getOutput(0));
   }
@@ -138,8 +137,7 @@ fillGraph(mir::Graph &g,
 
   // Create graph outputs.
   assert(op->getNumOutputs() == 1);
-  g.create<mir::ops::OutputOp>(op->getName(), op->getOutput(0));
-  op->setName("");
+  g.create<mir::ops::OutputOp>(op->getOutput(0));
 
   return op;
 }
@@ -352,7 +350,7 @@ TEST(cpp_operations_test, capped_relu)
   vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
   fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
   auto op_generator = [cap](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::CappedReluOp>("y", inputs[0], cap);
+    return g.create<mir::ops::CappedReluOp>(inputs[0], cap);
   };
 
   createAndRunTestGraph(op_generator, cappedRelu, input_ntensors, input_atensor);
@@ -376,7 +374,7 @@ TEST(cpp_operations_test, concat)
       fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
       auto op_generator = [axis](mir::Graph &g,
                                  const std::vector<mir::Operation::Output *> &inputs) {
-        return g.create<mir::ops::ConcatOp>("y", inputs, axis);
+        return g.create<mir::ops::ConcatOp>(inputs, axis);
       };
 
       createAndRunTestGraph(op_generator, concat<Tensor, Tensor>, input_ntensors, input_atensors[0],
@@ -398,7 +396,7 @@ TEST(cpp_operations_test, addbc)
     fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
     fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
     auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::AddOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::AddOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(op_generator, ElementWise<Add, Tensor, Tensor>, input_ntensors,
@@ -420,7 +418,7 @@ TEST(cpp_operations_test, mulbc)
     fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 1.0f);
     fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
     auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::MulOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::MulOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(opGenerator, ElementWise<Mul, Tensor, Tensor>, input_ntensors,
@@ -442,7 +440,7 @@ TEST(cpp_operations_test, divbc)
     fillTensors(input_ntensors[0], input_atensors[0], shape_data1, 5.0f);
     fillTensors(input_ntensors[1], input_atensors[1], shape_data2, 2.0f);
     auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::DivOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::DivOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(opGenerator, ElementWise<Div, Tensor, Tensor>, input_ntensors,
@@ -462,7 +460,7 @@ TEST(cpp_operations_test, add)
     fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
     fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
     auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::AddOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::AddOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(op_generator, ElementWise<Add, Tensor, Tensor>, input_ntensors,
@@ -482,7 +480,7 @@ TEST(cpp_operations_test, sub)
     fillTensors(input_n_tensors[0], input_atensors[0], shape_data, 1.0f);
     fillTensors(input_n_tensors[1], input_atensors[1], shape_data, 2.0f);
     auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::SubOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::SubOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(opGenerator, ElementWise<Sub, Tensor, Tensor>, input_n_tensors,
@@ -502,7 +500,7 @@ TEST(cpp_operations_test, mul)
     fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
     fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
     auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::MulOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::MulOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(op_generator, ElementWise<Mul, Tensor, Tensor>, input_ntensors,
@@ -522,7 +520,7 @@ TEST(cpp_operations_test, max)
     fillTensors(input_ntensors[0], input_atensors[0], shape_data, 1.0f);
     fillTensors(input_ntensors[1], input_atensors[1], shape_data, 2.0f);
     auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::MaxOp>("y", inputs[0], inputs[1]);
+      return g.create<mir::ops::MaxOp>(inputs[0], inputs[1]);
     };
 
     createAndRunTestGraph(op_generator, ElementWise<Max, Tensor, Tensor>, input_ntensors,
@@ -557,7 +555,7 @@ TEST(cpp_operations_test, convTransposed2d)
               auto pad_t = mir::ops::PaddingType::Same;
               auto op_generator = [&strides, pad_t](
                   mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-                return g.create<mir::ops::DeConv2DOp>("y", inputs[0], inputs[1], strides, pad_t);
+                return g.create<mir::ops::DeConv2DOp>(inputs[0], inputs[1], strides, pad_t);
               };
 
               createAndRunTestGraph(op_generator, convTransposed2d, input_ntensors, input_atensor0,
@@ -592,7 +590,7 @@ TEST(cpp_operations_test, conv2d)
               auto op_generator = [&strides](mir::Graph &g,
                                              const std::vector<mir::Operation::Output *> &inputs) {
                 std::vector<int32_t> padding{0, 0};
-                return g.create<mir::ops::Conv2DOp>("y", inputs[0], inputs[1], strides, padding,
+                return g.create<mir::ops::Conv2DOp>(inputs[0], inputs[1], strides, padding,
                                                     padding);
               };
 
@@ -628,8 +626,8 @@ TEST(cpp_operations_test, depthwise_conv)
               auto op_generator = [&strides](mir::Graph &g,
                                              const std::vector<mir::Operation::Output *> &inputs) {
                 std::vector<int32_t> padding{0, 0};
-                return g.create<mir::ops::DepthwiseConv2DOp>("y", inputs[0], inputs[1], strides,
-                                                             padding, padding);
+                return g.create<mir::ops::DepthwiseConv2DOp>(inputs[0], inputs[1], strides, padding,
+                                                             padding);
               };
 
               createAndRunTestGraph(op_generator, depthwiseConv2d, input_ntensors, input_atensor0,
@@ -647,7 +645,7 @@ TEST(cpp_operations_test, fully_connected)
   fillTensors(input_ntensors[0], input_atensor0, input_shape_data, 1.0f);
   fillTensors(input_ntensors[1], input_atensor1, weights_shape_data, 1.0f);
   auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::FullyConnectedOp>("y", inputs[0], inputs[1]);
+    return g.create<mir::ops::FullyConnectedOp>(inputs[0], inputs[1]);
   };
 
   createAndRunTestGraph(op_generator, fullConnect, input_ntensors, input_atensor0, input_atensor1);
@@ -666,7 +664,7 @@ TEST(cpp_operations_test, resize_NN_test)
     auto op_generator = [&res_shape](mir::Graph &g,
                                      const std::vector<mir::Operation::Output *> &inputs) {
       return g.create<mir::ops::ResizeOp>(
-          "y", inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+          inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
     };
 
     createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
@@ -687,7 +685,7 @@ TEST(cpp_operations_test, resize_NN_test_scales)
     auto op_generator = [&scales](mir::Graph &g,
                                   const std::vector<mir::Operation::Output *> &inputs) {
       return g.create<mir::ops::ResizeOp>(
-          "y", inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
+          inputs[0], mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales);
     };
     createAndRunTestGraph(op_generator, resize, input_ntensors, input_atensor);
   }
@@ -699,8 +697,8 @@ createPool(mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs,
            mir::Shape &window_shape, mir::Shape &strides, irOps::PoolOp::BorderType border)
 {
   std::vector<int32_t> padding{0, 0};
-  return g.create<mir::ops::PoolOp>("pool", inputs[0], poolT, window_shape, strides, padding,
-                                    padding, border);
+  return g.create<mir::ops::PoolOp>(inputs[0], poolT, window_shape, strides, padding, padding,
+                                    border);
 };
 
 template <irOps::PoolOp::PoolingType poolT, typename Func>
@@ -758,7 +756,7 @@ TEST(cpp_operations_test, relu)
   vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
   fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
   auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::ReluOp>("y", inputs[0]);
+    return g.create<mir::ops::ReluOp>(inputs[0]);
   };
 
   createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
@@ -772,7 +770,7 @@ TEST(cpp_operations_test, leaky_relu)
   vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
   fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
   auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::LeakyReluOp>("y", inputs[0], 0.1);
+    return g.create<mir::ops::LeakyReluOp>(inputs[0], 0.1);
   };
 
   createAndRunTestGraph(op_generator, leakyRelu, input_ntensors, input_atensor);
@@ -786,7 +784,7 @@ TEST(cpp_operations_test, sigmoid)
   vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
   fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
   auto opGenerator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::SigmoidOp>("y", inputs[0]);
+    return g.create<mir::ops::SigmoidOp>(inputs[0]);
   };
 
   createAndRunTestGraph(opGenerator, sigmoid, input_ntensors, input_atensor);
@@ -800,7 +798,7 @@ TEST(cpp_operations_test, elu)
   vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
   fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
   auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::EluOp>("y", inputs[0], 1);
+    return g.create<mir::ops::EluOp>(inputs[0], 1);
   };
 
   createAndRunTestGraph(op_generator, elu, input_ntensors, input_atensor);
@@ -814,7 +812,7 @@ TEST(cpp_operations_test, tanh)
   vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
   fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
   auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::TanhOp>("y", inputs[0]);
+    return g.create<mir::ops::TanhOp>(inputs[0]);
   };
 
   createAndRunTestGraph(op_generator, tanhActivation, input_ntensors, input_atensor);
@@ -835,7 +833,7 @@ TEST(cpp_operations_test, reduceMeanTst)
       fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
       auto op_generator = [&axis_list, keep_dims](
           mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-        auto op = g.create<mir::ops::ReduceOp>("y", inputs[0], axis_list, keep_dims,
+        auto op = g.create<mir::ops::ReduceOp>(inputs[0], axis_list, keep_dims,
                                                mir::ops::ReduceOp::FuncType::mean);
         return op;
       };
@@ -858,7 +856,7 @@ TEST(cpp_operations_test, softmax)
     vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
     fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
     auto op_generator = [axis](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::SoftmaxOp>("y", inputs[0], axis);
+      return g.create<mir::ops::SoftmaxOp>(inputs[0], axis);
     };
 
     createAndRunTestGraph(op_generator, softmax, input_ntensors, input_atensor);
@@ -880,7 +878,7 @@ TEST(cpp_operations_test, slice4d)
       vector<unique_ptr<mir::TensorVariant>> input_n_tensor(1);
       fillTensors(input_n_tensor[0], input_atensor, shape_data, 1.0f);
       auto op_gen = [st, sz](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-        return g.create<mir::ops::SliceOp>("y", inputs[0], mir::Shape(st), mir::Shape(sz));
+        return g.create<mir::ops::SliceOp>(inputs[0], mir::Shape(st), mir::Shape(sz));
       };
       createAndRunTestGraph(op_gen, slice, input_n_tensor, input_atensor);
     }
@@ -899,7 +897,7 @@ TEST(cpp_operations_test, reshape)
   fillTensors(input_ntensors[0], input_atensor, input_shape_data, 1.0f);
   auto op_generator = [&output_nshape](mir::Graph &g,
                                        const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::ReshapeOp>("y", inputs[0], output_nshape);
+    return g.create<mir::ops::ReshapeOp>(inputs[0], output_nshape);
   };
 
   createAndRunTestGraph(op_generator, reshape, input_ntensors, input_atensor);
@@ -913,7 +911,7 @@ TEST(cpp_operations_test, sqrtTest)
   vector<unique_ptr<mir::TensorVariant>> input_ntensor(1);
   fillTensors(input_ntensor[0], input_atensor, shape_data, 1.0f);
   auto op_generator = [](mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::SqrtOp>("y", inputs[0]);
+    return g.create<mir::ops::SqrtOp>(inputs[0]);
   };
   createAndRunTestGraph(op_generator, sqrtFN, input_ntensor, input_atensor);
 }
@@ -934,7 +932,7 @@ TEST(cpp_operations_test, pad)
 
   auto op_generator = [&padding_before, &padding_after, padding_value](
       mir::Graph &g, const std::vector<mir::Operation::Output *> &inputs) {
-    return g.create<mir::ops::PadOp>("y", inputs[0], padding_before, padding_after, padding_value);
+    return g.create<mir::ops::PadOp>(inputs[0], padding_before, padding_after, padding_value);
   };
 
   createAndRunTestGraph(op_generator, pad, input_ntensor, input_atensor);
@@ -953,7 +951,7 @@ TEST(cpp_operations_test, transpose)
   {
     auto op_generator = [&permute](mir::Graph &g,
                                    const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::TransposeOp>("transpose", inputs[0], permute);
+      return g.create<mir::ops::TransposeOp>(inputs[0], permute);
     };
     createAndRunTestGraph(op_generator, transpose, input_ntensor_4d, input_atensor_4d);
   }
@@ -968,7 +966,7 @@ TEST(cpp_operations_test, transpose)
   {
     auto op_generator = [&permute](mir::Graph &g,
                                    const std::vector<mir::Operation::Output *> &inputs) {
-      return g.create<mir::ops::TransposeOp>("transpose", inputs[0], permute);
+      return g.create<mir::ops::TransposeOp>(inputs[0], permute);
     };
     createAndRunTestGraph(op_generator, transpose, input_ntensor_3d, input_atensor_3d);
   }
index 41ba502..446f427 100644 (file)
@@ -82,9 +82,9 @@ TEST(Generator, check_generator_call)
   cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
 
   mir::Graph g;
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3, 4});
-  input->getOutput(0)->setName("input");
-  Operation *output = g.create<ops::ReluOp>("output", input->getOutput(0));
+  Operation::Output *input = g.create<ops::InputOp>(Shape{1, 2, 3, 4})->getOutput(0);
+  input->setName("input");
+  Operation *output = g.create<ops::ReluOp>(input);
 
   // test that generator creates output dir and files
   if (isFileExists(TEST_DIR))
index f7d5ef6..9718a1e 100644 (file)
@@ -52,13 +52,13 @@ TEST(ModelAnalyzer, linearization)
    *      \     /
    *      [join]
    */
-  Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3});
-  Operation *head1 = g.create<ops::ReluOp>("head1", input->getOutput(0));
-  Operation *head2 = g.create<ops::ReluOp>("head2", input->getOutput(0));
-  Operation *tail1 = g.create<ops::ReluOp>("tail1", head1->getOutput(0));
-  Operation *tail2 = g.create<ops::ReluOp>("tail2", head2->getOutput(0));
+  Operation *input = g.create<ops::InputOp>(Shape{1, 2, 3});
+  Operation *head1 = g.create<ops::ReluOp>(input->getOutput(0));
+  Operation *head2 = g.create<ops::ReluOp>(input->getOutput(0));
+  Operation *tail1 = g.create<ops::ReluOp>(head1->getOutput(0));
+  Operation *tail2 = g.create<ops::ReluOp>(head2->getOutput(0));
   std::vector<mir::Operation::Output *> concat_inputs{tail1->getOutput(0), tail2->getOutput(0)};
-  Operation *join = g.create<ops::ConcatOp>("join", concat_inputs, 0);
+  Operation *join = g.create<ops::ConcatOp>(concat_inputs, 0);
   input->getOutput(0)->setName("input");
   head1->getOutput(0)->setName("head1");
   head2->getOutput(0)->setName("head2");