[nnc] Refactor TFLite importer to support operators with multiple outputs (#2816)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Tue, 15 Jan 2019 10:43:29 +0000 (13:43 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Tue, 15 Jan 2019 10:43:29 +0000 (13:43 +0300)
Make operator conversion methods accept and return vectors of `IODescriptor`s.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.h
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h

index f73e12b..f1e6c86 100644 (file)
@@ -113,7 +113,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
       break;
     default:
       if (opcode <= BuiltinOperator_MAX) {
-        _problemsOpSet.insert(std::string(EnumNamesBuiltinOperator()[opcode])
+        _problemsOpSet.insert(std::string(EnumNameBuiltinOperator(opcode))
                               + ": unsupported operator");
       } else {
         _problemsOpSet.insert(std::to_string(opcode)
@@ -147,13 +147,13 @@ void TfliteImporter::walkSubGraph(const SubGraph* s) {
 
   for (auto i : *s->inputs()) {
     const Tensor* t = (*s->tensors())[i];
-    Shape inputShape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
+    Shape input_shape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
 
     // TODO Remove this limitation.
-    assert(inputShape.dim(0) == 1);
+    assert(input_shape.dim(0) == 1);
 
-    auto node = _graph->create<mir::ops::VariableOp>(t->name()->c_str(), inputShape);
-    _opsForTensorsTheyOutput[i] = node;
+    auto input = _graph->create<mir::ops::VariableOp>(t->name()->c_str(), input_shape);
+    _tensorMap[i] = input->getOutput(0);
   }
 
   for (auto op: *(s->operators()))
@@ -161,136 +161,130 @@ void TfliteImporter::walkSubGraph(const SubGraph* s) {
 }
 
 void TfliteImporter::walkOperator(const Operator* op) {
-  auto inputs = getPrecedingMIROps(op);
-  auto params = createOpParams(op);
+  std::vector<mir::IODescriptor> inputs = getMIRInputsForOperator(op);
+  std::vector<mir::TensorVariant> params = createOpParams(op);
+  std::vector<mir::IODescriptor> outputs;
 
-  std::vector<mir::Operation*> outputs;
-
-  unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
+  BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
   switch (opcode) {
     case BuiltinOperator_CONV_2D:
       outputs = _opCreator->convertConv2D(inputs, params, op->builtin_options_as<Conv2DOptions>());
       break;
     case BuiltinOperator_DEPTHWISE_CONV_2D:
       outputs = _opCreator->convertDepthwiseConv2D(
-        inputs, params,
-        op->builtin_options_as<DepthwiseConv2DOptions>());
+          inputs, params, op->builtin_options_as<DepthwiseConv2DOptions>());
       break;
     case BuiltinOperator_MAX_POOL_2D:
-      outputs = _opCreator->convertMaxPool2D(inputs, params,
-                                             op->builtin_options_as<Pool2DOptions>());
+      outputs = _opCreator->convertMaxPool2D(inputs, op->builtin_options_as<Pool2DOptions>());
       break;
     case BuiltinOperator_AVERAGE_POOL_2D:
-      outputs = _opCreator->convertAveragePool2D(inputs, params,
-                                                 op->builtin_options_as<Pool2DOptions>());
+      outputs = _opCreator->convertAveragePool2D(inputs, op->builtin_options_as<Pool2DOptions>());
       break;
     case BuiltinOperator_CONCATENATION:
-      outputs = _opCreator->convertConcatenation(inputs, params,
+      outputs = _opCreator->convertConcatenation(inputs,
                                                  op->builtin_options_as<ConcatenationOptions>());
       break;
     case BuiltinOperator_RESHAPE:
-      outputs = _opCreator->convertReshape(
-        inputs, params, op->builtin_options_as<ReshapeOptions>());
+      outputs = _opCreator->convertReshape(inputs, op->builtin_options_as<ReshapeOptions>());
       break;
     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
       outputs = _opCreator->convertResizeNN(inputs, params,
-        op->builtin_options_as<ResizeNearestNeighborOptions>());
+                                            op->builtin_options_as<ResizeNearestNeighborOptions>());
       break;
     case BuiltinOperator_MEAN:
-      outputs = _opCreator->convertReducer(inputs, params,ops::ReduceFOp::FuncType::mean,
-       op->builtin_options_as<ReducerOptions>());
+      outputs = _opCreator->convertReducer(inputs, params, ops::ReduceFOp::FuncType::mean,
+                                           op->builtin_options_as<ReducerOptions>());
       break;
     case BuiltinOperator_FULLY_CONNECTED:
-      outputs = _opCreator->convertFullyConnected(
-        inputs, params, op->builtin_options_as<FullyConnectedOptions>());
+      outputs = _opCreator->convertFullyConnected(inputs, params,
+                                                  op->builtin_options_as<FullyConnectedOptions>());
       break;
     case BuiltinOperator_SOFTMAX:
-      outputs = _opCreator->createSoftmax(
-        inputs, params, op->builtin_options_as<SoftmaxOptions>());
+      outputs = _opCreator->convertSoftmax(inputs, op->builtin_options_as<SoftmaxOptions>());
       break;
     case BuiltinOperator_SLICE:
-      outputs = _opCreator->createSlice(inputs, params, op->builtin_options_as_SliceOptions());
+      outputs = _opCreator->convertSlice(inputs, params, op->builtin_options_as<SliceOptions>());
       break;
     case BuiltinOperator_SQUEEZE:
-      outputs = _opCreator->createSqueeze(
-        inputs, params, op->builtin_options_as<SqueezeOptions>());
+      outputs = _opCreator->convertSqueeze(inputs, op->builtin_options_as<SqueezeOptions>());
       break;
     case BuiltinOperator_LOGISTIC:
-      outputs = _opCreator->createLogistic(inputs, params);
+      outputs = _opCreator->convertLogistic(inputs);
       break;
     case BuiltinOperator_SQRT:
-      outputs = _opCreator->createSqrt(inputs, params);
+      outputs = _opCreator->convertSqrt(inputs);
       break;
     case BuiltinOperator_ADD:
       outputs = _opCreator->createElementwise(
-        inputs, params, ops::ElementwiseOp::OpType::add,
-        op->builtin_options_as_AddOptions()->fused_activation_function());
+          inputs, params, ops::ElementwiseOp::OpType::add,
+          op->builtin_options_as_AddOptions()->fused_activation_function());
+      break;
+    case BuiltinOperator_SUB:
+      outputs = _opCreator->createElementwise(
+          inputs, params, ops::ElementwiseOp::OpType::sub,
+          op->builtin_options_as_SubOptions()->fused_activation_function());
       break;
     case BuiltinOperator_MUL:
       outputs = _opCreator->createElementwise(
-        inputs, params, ops::ElementwiseOp::OpType::mul,
-        op->builtin_options_as_MulOptions()->fused_activation_function());
+          inputs, params, ops::ElementwiseOp::OpType::mul,
+          op->builtin_options_as_MulOptions()->fused_activation_function());
       break;
     case BuiltinOperator_DIV:
       outputs = _opCreator->createElementwise(
-        inputs, params, ops::ElementwiseOp::OpType::div,
-        op->builtin_options_as_DivOptions()->fused_activation_function());
+          inputs, params, ops::ElementwiseOp::OpType::div,
+          op->builtin_options_as_DivOptions()->fused_activation_function());
       break;
     case BuiltinOperator_MAXIMUM:
-      outputs = _opCreator->createElementwise(
-        inputs, params, ops::ElementwiseOp::OpType::max,
-        ActivationFunctionType_NONE); // no activation
-      break;
-    case BuiltinOperator_SUB:
-      outputs = _opCreator->createElementwise(
-        inputs, params, ops::ElementwiseOp::OpType::sub,
-        op->builtin_options_as_SubOptions()->fused_activation_function());
+      outputs = _opCreator->createElementwise(inputs, params, ops::ElementwiseOp::OpType::max,
+                                              ActivationFunctionType_NONE); // no activation
       break;
     case BuiltinOperator_SQUARED_DIFFERENCE:
-      outputs = _opCreator->createSquaredDifference(inputs, params); // no activation
+      outputs = _opCreator->convertSquaredDifference(inputs, params);
       break;
     case BuiltinOperator_TRANSPOSE_CONV: {
       auto tensor = (*_tensors)[op->outputs()->Get(0)];
       auto out_shape = ShapeHelper::createShape(*tensor->shape(), tensor->shape()->size());
-      outputs = _opCreator->createTransposeConv(
-        inputs, params, op->builtin_options_as<TransposeConvOptions>(), out_shape);
+      outputs = _opCreator->convertTransposeConv(inputs, params,
+                                                 op->builtin_options_as<TransposeConvOptions>(),
+                                                 out_shape);
       break;
     }
     case BuiltinOperator_PAD:
-      outputs = _opCreator->createPad(inputs, params, op->builtin_options_as<PadOptions>());
+      outputs = _opCreator->convertPad(inputs, params, op->builtin_options_as<PadOptions>());
       break;
     case BuiltinOperator_TANH:
-      outputs = _opCreator->createActivation(inputs, params, ActivationFunctionType_TANH);
+      outputs = _opCreator->convertTanh(inputs);
       break;
     case BuiltinOperator_RELU:
-      outputs = _opCreator->createActivation(inputs, params, ActivationFunctionType_RELU);
+      outputs = _opCreator->convertReLU(inputs);
       break;
     case BuiltinOperator_RELU6:
-      outputs = _opCreator->createActivation(inputs, params, ActivationFunctionType_RELU6);
+      outputs = _opCreator->convertReLU6(inputs);
       break;
     case BuiltinOperator_TRANSPOSE:
-      outputs = _opCreator->createTranspose(
-          inputs, params, op->builtin_options_as<TransposeOptions>());
+      outputs = _opCreator->convertTranspose(inputs, params,
+                                             op->builtin_options_as<TransposeOptions>());
       break;
     case BuiltinOperator_STRIDED_SLICE:
-      outputs = _opCreator->createStridedSlice(
-        inputs, params, op->builtin_options_as<StridedSliceOptions>());
+      outputs = _opCreator->convertStridedSlice(inputs, params,
+                                                op->builtin_options_as<StridedSliceOptions>());
       break;
     case BuiltinOperator_LEAKY_RELU:
-      outputs = _opCreator->createLeakyRelu(inputs, params,
-                                            op->builtin_options_as<LeakyReluOptions>());
+      outputs = _opCreator->convertLeakyReLU(inputs, op->builtin_options_as<LeakyReluOptions>());
       break;
     default:
       assert(false && "All unsupported types should have been found before this pass.");
   }
 
-  assert(op->outputs()->size() == outputs.size());
-  for (size_t i = 0; i < op->outputs()->size(); ++i)
-    _opsForTensorsTheyOutput[(*(op->outputs()))[i]] = outputs[i];
+  assert(outputs.size() == op->outputs()->size());
+  for (size_t i = 0; i < op->outputs()->size(); ++i) {
+    int32_t tensor_index = (*op->outputs())[i];
+    _tensorMap[tensor_index] = outputs[i];
+  }
 }
 
-std::vector<mir::Operation*> TfliteImporter::getPrecedingMIROps(const Operator* op) {
-  std::vector<mir::Operation*> inputsForOp;
+std::vector<mir::IODescriptor> TfliteImporter::getMIRInputsForOperator(const Operator* op) {
+  std::vector<mir::IODescriptor> inputs;
 
   try {
     for (auto i : *(op->inputs())) {
@@ -299,16 +293,15 @@ std::vector<mir::Operation*> TfliteImporter::getPrecedingMIROps(const Operator*
         // By this point every input for the operation "op" should have corresponding
         // Model IR operations that output its inputs. This assumption is provided by the fact
         // that TFLite format specifies all operations in the execution order.
-        inputsForOp.push_back(_opsForTensorsTheyOutput.at(i));
+        inputs.push_back(_tensorMap.at(i));
       }
     }
-  }
-  catch (const std::out_of_range& e) {
+  } catch (const std::out_of_range& e) {
     throw PassException("Found a TFLite operator with an input tensor for which "
                         "a corresponding Model IR node that outputs it was not created.");
   }
 
-  return inputsForOp;
+  return inputs;
 }
 
 std::vector<mir::TensorVariant> TfliteImporter::createOpParams(const Operator* op) {
@@ -320,7 +313,7 @@ std::vector<mir::TensorVariant> TfliteImporter::createOpParams(const Operator* o
     if (b->data() != nullptr) {
       auto tensor = createTensor(t, b);
 
-      unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
+      BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
 
       if ((opcode == BuiltinOperator_CONV_2D || opcode == BuiltinOperator_DEPTHWISE_CONV_2D)
           && t->shape()->size() == 4) {
@@ -386,15 +379,16 @@ mir::TensorVariant TfliteImporter::createTensor(const Tensor* t, const Buffer* b
 void TfliteImporter::setGraphOutputs() {
   // Marking nodes as output nodes.
   for (auto output_idx : _graphOutputs)
-    _graph->markOutput(_opsForTensorsTheyOutput[output_idx]);
+    _graph->markOutput(_tensorMap[output_idx].op);
 }
 
 void TfliteImporter::setIrNodeNames() {
   // Setting names of the nodes.
   // Note: we change the computation graph, (for example, TFLite Conv2D
   // turns into IR Conv2D->BiasAdd->ReLU), so not all of the nodes will have names.
-  for (auto& item : _opsForTensorsTheyOutput)
-    item.second->setName((*_tensors)[item.first]->name()->c_str());
+  for (auto iter : _tensorMap) {
+    iter.second.op->setName((*_tensors)[iter.first]->name()->c_str());
+  }
 }
 
 void TfliteImporter ::cleanup() {
index fd10aae..07204b2 100644 (file)
@@ -74,7 +74,7 @@ private:
 
   // This map maps indices of TFLite tensors to MIR operations/nodes
   // that correspond to operations having these tensors as output.
-  std::map<int, mir::Operation*> _opsForTensorsTheyOutput;
+  std::map<int, mir::IODescriptor> _tensorMap;
   // set of strings describing incorrect parts of network and parts of network unsupported by NNC
   std::set<std::string> _problemsOpSet;
 
@@ -115,7 +115,7 @@ private:
   /**
   * @brief Return MIR ops, preceding given tflite operator
   */
-  std::vector<mir::Operation*> getPrecedingMIROps(const ::tflite::Operator* op);
+  std::vector<mir::IODescriptor> getMIRInputsForOperator(const ::tflite::Operator* op);
 
   mir::TensorVariant createTensor(const ::tflite::Tensor* t,
                                   const ::tflite::Buffer* b);
index f7ddd7f..c23d62c 100644 (file)
 #include "core/modelIR/ShapeRange.h"
 #include "core/modelIR/Tensor.h"
 
-using namespace nnc::mir;
 using namespace ::tflite;
 
-
 namespace nnc {
 
 static void calculatePadding(tflite::Padding padding,
@@ -85,10 +83,11 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertConv2D(InputOps& inputs, const InputParams& params,
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertConv2D(const std::vector<mir::IODescriptor>& inputs,
+                               const std::vector<mir::TensorVariant>& params,
                                const Conv2DOptions* opts) {
-  const auto& input_shape = inputs[0]->getOutputShape(0);
+  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
   const auto& kernel_shape = params[0].getShape();
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
@@ -97,10 +96,10 @@ TFLiteOpCreator::convertConv2D(InputOps& inputs, const InputParams& params,
   calculatePadding(opts->padding(), input_shape, kernel_shape, strides, padding_before,
                    padding_after);
 
-  auto outputs = createOp<ops::Conv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                         params[0], strides, padding_before, padding_after);
-  return createOp<ops::BiasAddOp>(opts->fused_activation_function(), outputs[0]->getOutput(0),
-                                  params[1]);
+  auto result = createOp<ops::Conv2DOp>(inputs[0], params[0],
+                                        strides, padding_before, padding_after);
+  result = createOp<ops::BiasAddOp>(result->getOutput(0), params[1]);
+  return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
 void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
@@ -108,10 +107,11 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertDepthwiseConv2D(InputOps& inputs, const InputParams& params,
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertDepthwiseConv2D(const std::vector<mir::IODescriptor>& inputs,
+                                        const std::vector<mir::TensorVariant>& params,
                                         const DepthwiseConv2DOptions* opts) {
-  const auto& input_shape = inputs[0]->getOutputShape(0);
+  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
   const auto& kernel_shape = params[0].getShape();
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
@@ -120,11 +120,10 @@ TFLiteOpCreator::convertDepthwiseConv2D(InputOps& inputs, const InputParams& par
   calculatePadding(opts->padding(), input_shape, kernel_shape, strides, padding_before,
                    padding_after);
 
-  auto outputs = createOp<ops::DepthwiseConv2DOp>(ActivationFunctionType_NONE,
-                                                  inputs[0]->getOutput(0), params[0], strides,
-                                                  padding_before, padding_after);
-  return createOp<ops::BiasAddOp>(opts->fused_activation_function(), outputs[0]->getOutput(0),
-                                  params[1]);
+  auto result = createOp<ops::DepthwiseConv2DOp>(inputs[0], params[0],
+                                                 strides, padding_before, padding_after);
+  result = createOp<ops::BiasAddOp>(result->getOutput(0), params[1]);
+  return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
 void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
@@ -132,14 +131,11 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertConcatenation(InputOps& inputs, const InputParams& params,
-                                      const ConcatenationOptions* opts) {
-  std::vector<IODescriptor> descriptors;
-  for (auto i : inputs)
-    descriptors.push_back(i->getOutput(0));
-
-  return createOp<ops::ConcatOp>(opts->fused_activation_function(), descriptors, opts->axis());
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertConcatenation(const std::vector<mir::IODescriptor>& inputs,
+                                      const ::tflite::ConcatenationOptions* opts) {
+  auto result = createOp<ops::ConcatOp>(inputs, opts->axis());
+  return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
 void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
@@ -147,10 +143,10 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertMaxPool2D(InputOps& inputs, const InputParams& params,
-                                  const Pool2DOptions* opts) {
-  auto& input_shape = inputs[0]->getOutputShape(0);
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertMaxPool2D(const std::vector<mir::IODescriptor>& inputs,
+                                  const ::tflite::Pool2DOptions* opts) {
+  auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
   Shape window_shape{opts->filter_height(), opts->filter_width()};
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
@@ -159,16 +155,17 @@ TFLiteOpCreator::convertMaxPool2D(InputOps& inputs, const InputParams& params,
   calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
                    padding_after);
 
-  return createOp<ops::PoolOp>(opts->fused_activation_function(), inputs[0]->getOutput(0),
-                               ops::PoolOp::PoolingType::MAX, window_shape, strides, padding_before,
-                               padding_after, ops::PoolOp::BorderType::EMPTY,
-                               ops::PoolOp::RoundMode::floor);
+  auto result = createOp<ops::PoolOp>(inputs[0], ops::PoolOp::PoolingType::MAX,
+                                      window_shape, strides, padding_before, padding_after,
+                                      ops::PoolOp::BorderType::EMPTY,
+                                      ops::PoolOp::RoundMode::floor);
+  return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps& inputs,
-                                                                   const InputParams& params,
-                                                                   const Pool2DOptions* opts) {
-  auto& input_shape = inputs[0]->getOutputShape(0);
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertAveragePool2D(const std::vector<mir::IODescriptor>& inputs,
+                                      const ::tflite::Pool2DOptions* opts) {
+  auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
   Shape window_shape{opts->filter_height(), opts->filter_width()};
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
@@ -177,19 +174,21 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps& inp
   calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
                    padding_after);
 
-  return createOp<ops::PoolOp>(opts->fused_activation_function(), inputs[0]->getOutput(0),
-                               ops::PoolOp::PoolingType::AVG, window_shape, strides, padding_before,
-                               padding_after, ops::PoolOp::BorderType::EMPTY,
-                               ops::PoolOp::RoundMode::floor);
+  auto result = createOp<ops::PoolOp>(inputs[0], ops::PoolOp::PoolingType::AVG,
+                                      window_shape, strides, padding_before, padding_after,
+                                      ops::PoolOp::BorderType::EMPTY,
+                                      ops::PoolOp::RoundMode::floor);
+  return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createSoftmax(InputOps& inputs, const InputParams& params,
-                               const SoftmaxOptions* opts) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
+                                const ::tflite::SoftmaxOptions* opts) {
   // Softmax in TFLite is always 2-D.
-  assert(inputs[0]->getOutputShape(0).rank() == 2);
-  int32_t axis = 1;
-  return createOp<ops::SoftmaxOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), axis);
+  assert(inputs[0].op->getOutputShape(inputs[0].index).rank() == 2);
+  const int32_t axis = 1;
+  auto result = createOp<ops::SoftmaxOp>(inputs[0], axis);
+  return {result->getOutput(0)};
 }
 
 Shape shapeFromTensor(mir::Tensor<int32_t>&& t) {
@@ -201,95 +200,97 @@ Shape shapeFromTensor(mir::Tensor<int32_t>&& t) {
   return temporary_shape;
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createSlice(InputOps& inputs, const InputParams& params,
-                             const ::tflite::SliceOptions*) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertSlice(const std::vector<mir::IODescriptor>& inputs,
+                              const std::vector<mir::TensorVariant>& params,
+                              const ::tflite::SliceOptions* opts) {
   auto starts = shapeFromTensor(mir::Tensor<int32_t>(params[0]));
   auto sizes = shapeFromTensor(mir::Tensor<int32_t>(params[1]));
-  assert(starts.rank() == inputs[0]->getOutputShape(0).rank() &&
+  assert(starts.rank() == inputs[0].op->getOutputShape(inputs[0].index).rank() &&
          starts.rank() == sizes.rank());
-  return createOp<ops::SliceOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                starts, sizes);
+  auto result = createOp<ops::SliceOp>(inputs[0], starts, sizes);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertReshape(InputOps& inputs, const InputParams& params,
-                                const ReshapeOptions* opts) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs,
+                                const ::tflite::ReshapeOptions* opts) {
   // TODO: we should also support "-1" values in new_shape, which means that correct
   // shape values must be calculated. Better do it in the shape inference module.
   Shape new_shape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
-  auto outputs = createOp<ops::ReshapeOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                          new_shape);
-  return outputs;
+  auto result = createOp<ops::ReshapeOp>(inputs[0], new_shape);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createTransposeConv(InputOps& inputs, const InputParams& params,
-                                     const ::tflite::TransposeConvOptions* opts,
-                                     const Shape& output_shape) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertTransposeConv(const std::vector<mir::IODescriptor>& inputs,
+                                      const std::vector<mir::TensorVariant>& params,
+                                      const ::tflite::TransposeConvOptions* opts,
+                                      const Shape& output_shape) {
   Shape strides{opts->stride_h(), opts->stride_w()};
 
-  return createOp<ops::DeConv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), params[1],
-                                   strides, paddingMap[opts->padding()], output_shape);
+  auto result = createOp<ops::DeConv2DOp>(inputs[0], params[1],
+                                          strides, paddingMap[opts->padding()], output_shape);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertResizeNN(InputOps& inputs, const InputParams& params,
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertResizeNN(const std::vector<mir::IODescriptor>& inputs,
+                                 const std::vector<mir::TensorVariant>& params,
                                  const ::tflite::ResizeNearestNeighborOptions* opts) {
   // TODO support aligned corners
   assert(!opts->align_corners() && "Aligned corners not currently supported");
 
-  const auto& input_shape = inputs[0]->getOutputShape(0);
+  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
   assert(input_shape.rank() == 4);
   mir::Tensor<int> out_shapes(params[0]);
   Shape res_shape(4);
   res_shape.dim(0) = input_shape.dim(0);
-  res_shape.dim(1) = out_shapes.at(Index{0});
-  res_shape.dim(2) = out_shapes.at(Index{1});
+  res_shape.dim(1) = out_shapes.at(mir::Index{0});
+  res_shape.dim(2) = out_shapes.at(mir::Index{1});
   res_shape.dim(3) = input_shape.dim(3);
-  return createOp<ops::ResizeOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                 ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
+  auto result = createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor,
+                                        res_shape);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createElementwise(const InputOps& inputs, const InputParams& params,
-                                   ops::ElementwiseOp::OpType opType,
-                                   const ::tflite::ActivationFunctionType activation) {
-  std::vector<IODescriptor> descriptors;
-
-  for (auto i : inputs)
-    descriptors.push_back(i->getOutput(0));
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::createElementwise(const std::vector<mir::IODescriptor>& inputs,
+                                   const std::vector<mir::TensorVariant>& params,
+                                   ops::ElementwiseOp::OpType op_type,
+                                   ::tflite::ActivationFunctionType activation) {
+  std::vector<mir::IODescriptor> descriptors = inputs;
 
   for (const auto& param : params) {
-    auto weights_tensor = createOp<ops::ConstantOp>(ActivationFunctionType_NONE, param);
-    descriptors.push_back(weights_tensor[0]->getOutput(0));
+    auto weights_tensor = createOp<ops::ConstantOp>(param);
+    descriptors.push_back(weights_tensor->getOutput(0));
   }
-  return createOp<ops::ElementwiseOp>(activation, descriptors, opType);
-}
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createSquaredDifference(const InputOps& inputs, const InputParams& params) {
-  std::vector<IODescriptor> descriptors;
+  auto result = createOp<ops::ElementwiseOp>(descriptors, op_type);
+  return {addFusedActivation(result->getOutput(0), activation)};
+}
 
-  for (auto i : inputs)
-    descriptors.push_back(i->getOutput(0));
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs,
+                                          const std::vector<mir::TensorVariant>& params) {
+  std::vector<mir::IODescriptor> descriptors = inputs;
 
   for (const auto& param : params) {
-    auto weights_tensor = createOp<ops::ConstantOp>(ActivationFunctionType_NONE, param);
-    descriptors.push_back(weights_tensor[0]->getOutput(0));
+    auto weights_tensor = createOp<ops::ConstantOp>(param);
+    descriptors.push_back(weights_tensor->getOutput(0));
   }
 
-  auto sub_result = createOp<ops::ElementwiseOp>(ActivationFunctionType_NONE, descriptors,
-                                             ops::ElementwiseOp::OpType::sub);
-
-  return createOp<ops::ElementwiseOp>(ActivationFunctionType_NONE,
-                                      std::vector<IODescriptor>{sub_result[0]->getOutput(0),
-                                                                sub_result[0]->getOutput(0)},
-                                      ops::ElementwiseOp::OpType::mul);
+  auto result = createOp<ops::ElementwiseOp>(descriptors, ops::ElementwiseOp::OpType::sub);
+  result = createOp<ops::ElementwiseOp>(std::vector<mir::IODescriptor>{
+                                            result->getOutput(0),
+                                            result->getOutput(0)},
+                                        ops::ElementwiseOp::OpType::mul);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertReducer(InputOps& inputs, const InputParams& params,
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertReducer(const std::vector<mir::IODescriptor>& inputs,
+                                const std::vector<mir::TensorVariant>& params,
                                 ops::ReduceFOp::FuncType ft,
                                 const ::tflite::ReducerOptions* opts) {
   assert(params.at(0).getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
@@ -302,9 +303,8 @@ TFLiteOpCreator::convertReducer(InputOps& inputs, const InputParams& params,
 
   std::sort(axes.begin(), axes.end());
 
-  return createOp<ops::ReduceFOp>(
-    ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-    axes, opts->keep_dims(), ft);
+  auto result = createOp<ops::ReduceFOp>(inputs[0], axes, opts->keep_dims(), ft);
+  return {result->getOutput(0)};
 }
 
 void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
@@ -312,19 +312,17 @@ void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::convertFullyConnected(InputOps& inputs,
-                                       const InputParams& params,
-                                       const FullyConnectedOptions* opts) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertFullyConnected(const std::vector<mir::IODescriptor>& inputs,
+                                       const std::vector<mir::TensorVariant>& params,
+                                       const ::tflite::FullyConnectedOptions* opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fc_input_size]
   int32_t fc_input_size = params[0].getShape().dim(0);
-  auto outputs = createOp<ops::ReshapeOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                          Shape{1, fc_input_size});
+  auto flatten = createOp<ops::ReshapeOp>(inputs[0], Shape{1, fc_input_size});
 
-  auto fc_outputs = createOp<ops::FullyConnectedOp>(ActivationFunctionType_NONE,
-                                                    outputs[0]->getOutput(0), params[0]);
-  return createOp<ops::BiasAddOp>(opts->fused_activation_function(), fc_outputs[0]->getOutput(0),
-                                  params[1]);
+  auto result = createOp<ops::FullyConnectedOp>(flatten->getOutput(0), params[0]);
+  result = createOp<ops::BiasAddOp>(result->getOutput(0), params[1]);
+  return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
 void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type,
@@ -334,48 +332,40 @@ void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type
       activation_type != ActivationFunctionType_RELU6 &&
       activation_type != ActivationFunctionType_TANH)
     problems_op_set.insert(std::string("Unsupported activation type: ")
-                           + EnumNamesActivationFunctionType()[activation_type]);
-}
-
-mir::Operation* TFLiteOpCreator::addFusedActivation(mir::Operation* input,
-                                                    ActivationFunctionType activation_type) {
-  mir::Operation* activation;
-
-  if (activation_type != ActivationFunctionType_NONE) {
-    // TODO: process other activation types
-    assert(input->getNumOutputs() == 1);
-    switch (activation_type) {
-      case ActivationFunctionType_RELU:
-        activation = _graph->create<ops::ReluOp>("", input->getOutput(0));
-        break;
-      case ActivationFunctionType_RELU6:
-        activation = _graph->create<ops::CappedReluOp>("", input->getOutput(0), 6);
-        break;
-      case ActivationFunctionType_TANH:
-        activation = _graph->create<ops::TanhOp>("", input->getOutput(0));
-        break;
-      default:
-        assert(false && "Unsupported activation types must be detected before this pass");
-    }
-    return activation;
-  } else {
-    return input;
-  }
+                           + EnumNameActivationFunctionType(activation_type));
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(
-  InputOps& inputs, const InputParams& params, const ::tflite::SqueezeOptions* opts) {
+mir::IODescriptor TFLiteOpCreator::addFusedActivation(mir::IODescriptor input,
+                                                      ActivationFunctionType activation_type) {
+  // TODO Support other activation function types.
+  switch (activation_type) {
+    case ActivationFunctionType_NONE:
+      return input;
+    case ActivationFunctionType_RELU:
+      return createOp<ops::ReluOp>(input)->getOutput(0);
+    case ActivationFunctionType_RELU6:
+      return createOp<ops::CappedReluOp>(input, 6)->getOutput(0);
+    case ActivationFunctionType_TANH:
+      return createOp<ops::TanhOp>(input)->getOutput(0);
+    default:
+      assert(false && "Unsupported activation types must be detected before this pass");
+  }
+}
 
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertSqueeze(const std::vector<mir::IODescriptor>& inputs,
+                                const ::tflite::SqueezeOptions* opts) {
   std::vector<int32_t> squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()};
 
-  return createOp<ops::SqueezeOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                  squeeze_dims);
+  auto result = createOp<ops::SqueezeOp>(inputs[0], squeeze_dims);
+  return {result->getOutput(0)};
 }
 
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createPad(InputOps& inputs, const InputParams& params,
-                           const ::tflite::PadOptions *opts) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertPad(const std::vector<mir::IODescriptor>& inputs,
+                            const std::vector<mir::TensorVariant>& params,
+                            const ::tflite::PadOptions* opts) {
 
   assert(params.size() == 1); // support pad with one param
   std::vector<std::pair<int32_t, int32_t>> paddings;
@@ -389,54 +379,70 @@ TFLiteOpCreator::createPad(InputOps& inputs, const InputParams& params,
   paddings.reserve(static_cast<size_t>(num_dims));
   // create strucuture with paddings
   for (int i = 0; i < num_dims; i++)
-    paddings.emplace_back(paddings_tensor.at(Index({i, 0})), paddings_tensor.at(Index({i, 1})));
+    paddings.emplace_back(paddings_tensor.at(mir::Index({i, 0})),
+                          paddings_tensor.at(mir::Index({i, 1})));
   // create const value, it's float because we can't see input type
   float const_value = 0.0; // not support different constant value
   // create scalar with constant value
-  Scalar constant_value(reinterpret_cast<char*>(&const_value), DTYPE::FLOAT32, sizeof(float));
+  mir::Scalar constant_value(reinterpret_cast<char*>(&const_value),
+                             mir::DTYPE::FLOAT32, sizeof(float));
 
-  return createOp<ops::PadOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                              num_dims, paddings, constant_value);
+  auto result = createOp<ops::PadOp>(inputs[0], num_dims, paddings, constant_value);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createActivation(InputOps& inputs, const InputParams&,
-                                  const ::tflite::ActivationFunctionType activationType) {
-  assert(inputs.size() == 1);
-  return {addFusedActivation(inputs[0], activationType)};
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertTanh(const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::TanhOp>(inputs[0]);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createSqrt(InputOps& inputs, const InputParams&) {
-  return createOp<ops::SqrtOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertReLU(const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::ReluOp>(inputs[0]);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createLogistic(InputOps& inputs, const InputParams&) {
-  return createOp<ops::SigmoidOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertReLU6(const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::CappedReluOp>(inputs[0], 6);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createTranspose(InputOps& inputs, const InputParams& params,
-                                 const ::tflite::TransposeOptions*) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertSqrt(const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::SqrtOp>(inputs[0]);
+  return {result->getOutput(0)};
+}
 
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertLogistic(const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::SigmoidOp>(inputs[0]);
+  return {result->getOutput(0)};
+}
+
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertTranspose(const std::vector<mir::IODescriptor>& inputs,
+                                  const std::vector<mir::TensorVariant>& params,
+                                  const ::tflite::TransposeOptions* opts) {
   assert(params.size() == 1);
   std::vector<std::size_t> axis_order;
 
   mir::Tensor<int32_t> permutation_tensor(params[0]);
 
-  ShapeRange range(permutation_tensor.getShape());
+  mir::ShapeRange range(permutation_tensor.getShape());
   for (const auto& index : range) {
     axis_order.push_back(permutation_tensor.at(index));
   }
 
-  return createOp<ops::TransposeOp>(ActivationFunctionType_NONE,
-                                    inputs[0]->getOutput(0), axis_order);
+  auto result = createOp<ops::TransposeOp>(inputs[0], axis_order);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createStridedSlice(InputOps& inputs, const InputParams& params,
-                                    const ::tflite::StridedSliceOptions* opts) {
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertStridedSlice(const std::vector<mir::IODescriptor>& inputs,
+                                     const std::vector<mir::TensorVariant>& params,
+                                     const ::tflite::StridedSliceOptions* opts) {
   assert(params.size() == 3);
 
   int32_t begin_mask = opts->begin_mask();
@@ -454,12 +460,12 @@ TFLiteOpCreator::createStridedSlice(InputOps& inputs, const InputParams& params,
 
   int32_t num_dims = begin.getShape().numElements();
 
-  assert(num_dims == inputs[0]->getOutputShape(0).rank() &&
+  assert(num_dims == inputs[0].op->getOutputShape(inputs[0].index).rank() &&
          num_dims == end.getShape().numElements());
 
   mir::Tensor<int32_t> strides(params[2]);
   // support only strides == 1
-  ShapeRange strides_range(strides.getShape());
+  mir::ShapeRange strides_range(strides.getShape());
   for (const auto& index: strides_range) {
     assert(strides.at(index) == 1 && "Strides not equal 1 not supported");
   }
@@ -471,29 +477,27 @@ TFLiteOpCreator::createStridedSlice(InputOps& inputs, const InputParams& params,
     if (begin_mask & (1 << axis))
       start.dim(axis) = 0;
     else
-      start.dim(axis) = begin.at(Index{axis});
+      start.dim(axis) = begin.at(mir::Index{axis});
 
     if (end_mask & (1 << axis))
-      size.dim(axis) = inputs[0]->getOutputShape(0).dim(axis) - start.dim(axis);
+      size.dim(axis) = inputs[0].op->getOutputShape(inputs[0].index).dim(axis) - start.dim(axis);
     else
-      size.dim(axis) = end.at(Index{axis}) - start.dim(axis);
+      size.dim(axis) = end.at(mir::Index{axis}) - start.dim(axis);
     
     if (shrink_axis_mask & (1 << axis))
       squeeze_dims.push_back(axis);
   }
 
-  auto slice_outputs = createOp<ops::SliceOp>(ActivationFunctionType_NONE,
-                                              inputs[0]->getOutput(0), start, size);
-  return createOp<ops::SqueezeOp>(ActivationFunctionType_NONE,
-                                  slice_outputs[0]->getOutput(0), squeeze_dims);
+  auto result = createOp<ops::SliceOp>(inputs[0], start, size);
+  result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
+  return {result->getOutput(0)};
 }
 
-std::vector<mir::Operation*>
-TFLiteOpCreator::createLeakyRelu(TFLiteOpCreator::InputOps& inputs, const TFLiteOpCreator::InputParams&,
-                                 const ::tflite::LeakyReluOptions* opts) {
-  float alpha = opts->alpha();
-
-  return createOp<ops::LeakyReluOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), alpha);
+std::vector<mir::IODescriptor>
+TFLiteOpCreator::convertLeakyReLU(const std::vector<mir::IODescriptor>& inputs,
+                                  const ::tflite::LeakyReluOptions* opts) {
+  auto result = createOp<ops::LeakyReluOp>(inputs[0], opts->alpha());
+  return {result->getOutput(0)};
 }
 
 } // namespace nnc
index 6ba6f92..de29b58 100644 (file)
@@ -43,70 +43,98 @@ using mir::Shape;
 
 class TFLiteOpCreator {
 public:
-  using InputOps = std::vector<mir::Operation*>;
-  using InputParams = std::vector<mir::TensorVariant>;
-
   explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
 
-  std::vector<mir::Operation*> convertConv2D(InputOps&, const InputParams&,
-                                             const ::tflite::Conv2DOptions*);
+  std::vector<mir::IODescriptor>
+  convertConv2D(const std::vector<mir::IODescriptor>& inputs,
+                const std::vector<mir::TensorVariant>& params,
+                const ::tflite::Conv2DOptions* opts);
+
+  std::vector<mir::IODescriptor>
+  convertDepthwiseConv2D(const std::vector<mir::IODescriptor>& inputs,
+                         const std::vector<mir::TensorVariant>& params,
+                         const ::tflite::DepthwiseConv2DOptions* opts);
 
-  std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps&, const InputParams&,
-                                                      const ::tflite::DepthwiseConv2DOptions*);
+  std::vector<mir::IODescriptor>
+  convertConcatenation(const std::vector<mir::IODescriptor>& inputs,
+                       const ::tflite::ConcatenationOptions* opts);
 
-  std::vector<mir::Operation*> convertConcatenation(InputOps&, const InputParams&,
-                                                    const ::tflite::ConcatenationOptions*);
+  std::vector<mir::IODescriptor>
+  convertMaxPool2D(const std::vector<mir::IODescriptor>& inputs,
+                   const ::tflite::Pool2DOptions* opts);
 
-  std::vector<mir::Operation*> convertMaxPool2D(InputOps&, const InputParams&,
-                                                const ::tflite::Pool2DOptions*);
+  std::vector<mir::IODescriptor>
+  convertAveragePool2D(const std::vector<mir::IODescriptor>& inputs,
+                       const ::tflite::Pool2DOptions* opts);
 
-  std::vector<mir::Operation*> convertAveragePool2D(InputOps&, const InputParams&,
-                                                    const ::tflite::Pool2DOptions*);
+  std::vector<mir::IODescriptor>
+  convertReducer(const std::vector<mir::IODescriptor>& inputs,
+                 const std::vector<mir::TensorVariant>& params,
+                 ops::ReduceFOp::FuncType ft,
+                 const ::tflite::ReducerOptions* opts);
 
-  std::vector<mir::Operation*> convertReducer(InputOps&, const InputParams&,
-                                              ops::ReduceFOp::FuncType,
-                                              const ::tflite::ReducerOptions*);
+  std::vector<mir::IODescriptor>
+  convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
+                 const ::tflite::SoftmaxOptions* opts);
 
-  std::vector<mir::Operation*> createSoftmax(InputOps&, const InputParams&,
-                                             const ::tflite::SoftmaxOptions*);
+  std::vector<mir::IODescriptor>
+  convertSlice(const std::vector<mir::IODescriptor>& inputs,
+               const std::vector<mir::TensorVariant>& params,
+               const ::tflite::SliceOptions* opts);
 
-  std::vector<mir::Operation*> createSlice(InputOps&, const InputParams&,
-                                           const ::tflite::SliceOptions*);
-  
-  std::vector<mir::Operation*> convertReshape(InputOps&, const InputParams&,
-                                              const ::tflite::ReshapeOptions*);
+  std::vector<mir::IODescriptor>
+  convertReshape(const std::vector<mir::IODescriptor>& inputs,
+                 const ::tflite::ReshapeOptions* opts);
 
-  std::vector<mir::Operation*> convertFullyConnected(InputOps&, const InputParams&,
-                                                     const ::tflite::FullyConnectedOptions*);
+  std::vector<mir::IODescriptor>
+  convertFullyConnected(const std::vector<mir::IODescriptor>& inputs,
+                        const std::vector<mir::TensorVariant>& params,
+                        const ::tflite::FullyConnectedOptions* opts);
 
-  std::vector<mir::Operation*> convertResizeNN(InputOps&, const InputParams&,
-                                               const ::tflite::ResizeNearestNeighborOptions*);
+  std::vector<mir::IODescriptor>
+  convertResizeNN(const std::vector<mir::IODescriptor>& inputs,
+                  const std::vector<mir::TensorVariant>& params,
+                  const ::tflite::ResizeNearestNeighborOptions* opts);
 
-  std::vector<mir::Operation*> createLogistic(InputOps& inputs, const InputParams& params);
+  std::vector<mir::IODescriptor>
+  convertLogistic(const std::vector<mir::IODescriptor>& inputs);
 
-  std::vector<mir::Operation*> createSqrt(InputOps& inputs, const InputParams& params);
+  std::vector<mir::IODescriptor>
+  convertSqrt(const std::vector<mir::IODescriptor>& inputs);
 
-  std::vector<mir::Operation*> createSqueeze(InputOps& inputs, const InputParams& params,
-                                             const ::tflite::SqueezeOptions* opts);
+  std::vector<mir::IODescriptor>
+  convertSqueeze(const std::vector<mir::IODescriptor>& inputs,
+                 const ::tflite::SqueezeOptions* opts);
 
   /** @brief Elementwise Operation */
-  std::vector<mir::Operation*> createElementwise(
-    const InputOps&, const InputParams&, ops::ElementwiseOp::OpType opType,
-    const ::tflite::ActivationFunctionType);
+  std::vector<mir::IODescriptor>
+  createElementwise(const std::vector<mir::IODescriptor>& inputs,
+                    const std::vector<mir::TensorVariant>& params,
+                    ops::ElementwiseOp::OpType op_type,
+                    ::tflite::ActivationFunctionType activation);
+
+  std::vector<mir::IODescriptor>
+  convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs,
+                           const std::vector<mir::TensorVariant>& params);
+
+  std::vector<mir::IODescriptor>
+  convertTanh(const std::vector<mir::IODescriptor>& inputs);
+
+  std::vector<mir::IODescriptor>
+  convertReLU(const std::vector<mir::IODescriptor>& inputs);
 
-  std::vector<mir::Operation*> createSquaredDifference(const InputOps&, const InputParams&);
+  std::vector<mir::IODescriptor>
+  convertReLU6(const std::vector<mir::IODescriptor>& inputs);
 
-  /// @brief Free-standing ( non-fused ) activation function based on tflite activation
-  std::vector<mir::Operation*> createActivation(InputOps&, const InputParams&,
-                                                const ::tflite::ActivationFunctionType);
   /**
  * @brief Creates a Transposed convolution
  * @param params 0 - output shape (unused), 1 - kernel, 2- input
  */
-  std::vector<mir::Operation*> createTransposeConv(
-    InputOps&, const InputParams&,
-    const ::tflite::TransposeConvOptions*,
-    const Shape&);
+  std::vector<mir::IODescriptor>
+  convertTransposeConv(const std::vector<mir::IODescriptor>& inputs,
+                       const std::vector<mir::TensorVariant>& params,
+                       const ::tflite::TransposeConvOptions* opts,
+                       const Shape& output_shape);
 
   /**
    * @brief Create a Pad operation
@@ -115,8 +143,10 @@ public:
    * @param opts TFLite PadOptions
    * @return Operations vector
    */
-  std::vector<mir::Operation*> createPad(InputOps&, const InputParams&,
-                                         const ::tflite::PadOptions* opts);
+  std::vector<mir::IODescriptor>
+  convertPad(const std::vector<mir::IODescriptor>& inputs,
+             const std::vector<mir::TensorVariant>& params,
+             const ::tflite::PadOptions* opts);
 
   /**
    * @brief Create a Transpose operation
@@ -125,8 +155,10 @@ public:
    * @param opts TFLite TransposeOptions
    * @return Operations vector
    */
-  std::vector<mir::Operation*> createTranspose(InputOps&, const InputParams&,
-                                               const ::tflite::TransposeOptions*);
+  std::vector<mir::IODescriptor>
+  convertTranspose(const std::vector<mir::IODescriptor>& inputs,
+                   const std::vector<mir::TensorVariant>& params,
+                   const ::tflite::TransposeOptions* opts);
 
   /**
    * @brief Create a Strided Slice operation
@@ -135,15 +167,18 @@ public:
    * @param opts TFLite StridedSliceOptions
    * @return Operations vector
    */
-  std::vector<mir::Operation*> createStridedSlice(InputOps&, const InputParams&,
-                                                  const ::tflite::StridedSliceOptions*);
+  std::vector<mir::IODescriptor>
+  convertStridedSlice(const std::vector<mir::IODescriptor>& inputs,
+                      const std::vector<mir::TensorVariant>& params,
+                      const ::tflite::StridedSliceOptions* opts);
 
   /**
    * @brief Create leaky relu activation
    * @return
    */
-  std::vector<mir::Operation*> createLeakyRelu(InputOps&, const InputParams&,
-                                               const ::tflite::LeakyReluOptions*);
+  std::vector<mir::IODescriptor>
+  convertLeakyReLU(const std::vector<mir::IODescriptor>& inputs,
+                   const ::tflite::LeakyReluOptions* opts);
 
   void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
 
@@ -164,21 +199,17 @@ private:
 
   void checkActivationType(::tflite::ActivationFunctionType, std::set<std::string>&);
 
-  mir::Operation* addFusedActivation(mir::Operation* input,
-                                     ::tflite::ActivationFunctionType activationType);
+  mir::IODescriptor addFusedActivation(mir::IODescriptor input,
+                                       ::tflite::ActivationFunctionType activation_type);
 
   template<typename OpType, typename... Types>
-  std::vector<mir::Operation*> createOp(::tflite::ActivationFunctionType activation,
-                                        Types&&... args);
+  mir::Operation* createOp(Types&&... args);
 };
 
 template<typename OpType, typename... Types>
-std::vector<mir::Operation*>
-TFLiteOpCreator::createOp(::tflite::ActivationFunctionType activation, Types&& ... args) {
+mir::Operation* TFLiteOpCreator::createOp(Types&& ... args) {
   // TODO: how to name operations? in Tensorflow tensors get names, not operations
-  auto op = _graph->create<OpType>("", std::forward<Types>(args)...);
-  auto fused_op = addFusedActivation(op, activation);
-  return {fused_op};
+  return _graph->create<OpType>("", std::forward<Types>(args)...);
 }
 
 } // namespace nnc