[nnc] Refactor TensorFlow Lite importer (#2890)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Mon, 21 Jan 2019 15:24:01 +0000 (18:24 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Mon, 21 Jan 2019 15:24:01 +0000 (18:24 +0300)
Refactor TensorFlow Lite importer to identically process constant and variable inputs to operators.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
contrib/nnc/include/core/modelIR/Operation.h
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp
contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.h
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h

index f09f458..86c7b90 100644 (file)
@@ -32,6 +32,7 @@ class Operation;
 struct IODescriptor {
   Operation* op;
   std::size_t index;
+  const Shape& getShape() const;
 };
 
 class Operation {
@@ -83,6 +84,10 @@ private:
   std::map<size_t, nnc::mir::Shape> _outputShapes;
 };
 
+inline const Shape& IODescriptor::getShape() const {
+  return op->getOutputShape(index);
+}
+
 } // namespace mir
 } // namespace nnc
 
index a618fe5..2df49b1 100644 (file)
@@ -926,7 +926,7 @@ shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(Operation& op, const Shape&
   if (op.getType() == Operation::Type::variable)
     _inputs.insert(&op);
 
-  if (op.getNextNodes().empty())
+  if (op.getNextNodes().empty() && op.getType() != Operation::Type::constant)
     _outputs.insert(&op);
 
   return genTensor(tensorName(&op), ir_shape, !op.getName().empty());
index 6eef9ad..d7ba484 100644 (file)
@@ -262,6 +262,12 @@ void ModelAnalyzer::visit(ops::VariableOp& op) {
 
 void ModelAnalyzer::visit(ops::ConstantOp& op) {
   assert(op.getPrevNodes().empty());
+
+  // FIXME This is to work around deserializeTensors not being able to deserialize tensors of type
+  // other than float32.
+  if (op.getNextNodes().empty())
+    return;
+
   addOpDescr(&op, "constant");
 }
 
index f1e6c86..bcfdb9a 100644 (file)
@@ -87,10 +87,16 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
       _opCreator->checkFullyConnected(op->builtin_options_as<FullyConnectedOptions>(),
                                       _problemsOpSet);
       break;
+    case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
+      _opCreator->checkResizeNearestNeighbor(op->builtin_options_as<ResizeNearestNeighborOptions>(),
+                                             _problemsOpSet);
+      break;
+    case BuiltinOperator_STRIDED_SLICE:
+      _opCreator->checkStridedSlice(op->builtin_options_as<StridedSliceOptions>(),
+                                    _problemsOpSet);
     case BuiltinOperator_SOFTMAX:
     case BuiltinOperator_SLICE:
     case BuiltinOperator_RESHAPE:
-    case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
     case BuiltinOperator_SQUEEZE:
     case BuiltinOperator_LOGISTIC:
     case BuiltinOperator_SQRT:
@@ -107,7 +113,6 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
     case BuiltinOperator_RELU:
     case BuiltinOperator_RELU6:
     case BuiltinOperator_TRANSPOSE:
-    case BuiltinOperator_STRIDED_SLICE:
     case BuiltinOperator_LEAKY_RELU:
       // No checks
       break;
@@ -162,51 +167,50 @@ void TfliteImporter::walkSubGraph(const SubGraph* s) {
 
 void TfliteImporter::walkOperator(const Operator* op) {
   std::vector<mir::IODescriptor> inputs = getMIRInputsForOperator(op);
-  std::vector<mir::TensorVariant> params = createOpParams(op);
   std::vector<mir::IODescriptor> outputs;
 
   BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
   switch (opcode) {
     case BuiltinOperator_CONV_2D:
-      outputs = _opCreator->convertConv2D(inputs, params, op->builtin_options_as<Conv2DOptions>());
+      outputs = _opCreator->convertConv2D(op->builtin_options_as<Conv2DOptions>(), inputs);
       break;
     case BuiltinOperator_DEPTHWISE_CONV_2D:
-      outputs = _opCreator->convertDepthwiseConv2D(
-          inputs, params, op->builtin_options_as<DepthwiseConv2DOptions>());
+      outputs = _opCreator->convertDepthwiseConv2D(op->builtin_options_as<DepthwiseConv2DOptions>(),
+                                                   inputs);
       break;
     case BuiltinOperator_MAX_POOL_2D:
-      outputs = _opCreator->convertMaxPool2D(inputs, op->builtin_options_as<Pool2DOptions>());
+      outputs = _opCreator->convertMaxPool2D(op->builtin_options_as<Pool2DOptions>(), inputs);
       break;
     case BuiltinOperator_AVERAGE_POOL_2D:
-      outputs = _opCreator->convertAveragePool2D(inputs, op->builtin_options_as<Pool2DOptions>());
+      outputs = _opCreator->convertAveragePool2D(op->builtin_options_as<Pool2DOptions>(), inputs);
       break;
     case BuiltinOperator_CONCATENATION:
-      outputs = _opCreator->convertConcatenation(inputs,
-                                                 op->builtin_options_as<ConcatenationOptions>());
+      outputs = _opCreator->convertConcatenation(
+          op->builtin_options_as<ConcatenationOptions>(), inputs);
       break;
     case BuiltinOperator_RESHAPE:
-      outputs = _opCreator->convertReshape(inputs, op->builtin_options_as<ReshapeOptions>());
+      outputs = _opCreator->convertReshape(op->builtin_options_as<ReshapeOptions>(), inputs);
       break;
     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
-      outputs = _opCreator->convertResizeNN(inputs, params,
-                                            op->builtin_options_as<ResizeNearestNeighborOptions>());
+      outputs = _opCreator->convertResizeNearestNeighbor(
+          op->builtin_options_as<ResizeNearestNeighborOptions>(), inputs);
       break;
     case BuiltinOperator_MEAN:
-      outputs = _opCreator->convertReducer(inputs, params, ops::ReduceFOp::FuncType::mean,
-                                           op->builtin_options_as<ReducerOptions>());
+      outputs = _opCreator->convertMean(
+          op->builtin_options_as<ReducerOptions>(), inputs);
       break;
     case BuiltinOperator_FULLY_CONNECTED:
-      outputs = _opCreator->convertFullyConnected(inputs, params,
-                                                  op->builtin_options_as<FullyConnectedOptions>());
+      outputs = _opCreator->convertFullyConnected(
+          op->builtin_options_as<FullyConnectedOptions>(), inputs);
       break;
     case BuiltinOperator_SOFTMAX:
-      outputs = _opCreator->convertSoftmax(inputs, op->builtin_options_as<SoftmaxOptions>());
+      outputs = _opCreator->convertSoftmax(op->builtin_options_as<SoftmaxOptions>(), inputs);
       break;
     case BuiltinOperator_SLICE:
-      outputs = _opCreator->convertSlice(inputs, params, op->builtin_options_as<SliceOptions>());
+      outputs = _opCreator->convertSlice(op->builtin_options_as<SliceOptions>(), inputs);
       break;
     case BuiltinOperator_SQUEEZE:
-      outputs = _opCreator->convertSqueeze(inputs, op->builtin_options_as<SqueezeOptions>());
+      outputs = _opCreator->convertSqueeze(op->builtin_options_as<SqueezeOptions>(), inputs);
       break;
     case BuiltinOperator_LOGISTIC:
       outputs = _opCreator->convertLogistic(inputs);
@@ -216,41 +220,37 @@ void TfliteImporter::walkOperator(const Operator* op) {
       break;
     case BuiltinOperator_ADD:
       outputs = _opCreator->createElementwise(
-          inputs, params, ops::ElementwiseOp::OpType::add,
-          op->builtin_options_as_AddOptions()->fused_activation_function());
+          ops::ElementwiseOp::OpType::add,
+          op->builtin_options_as_AddOptions()->fused_activation_function(), inputs);
       break;
     case BuiltinOperator_SUB:
       outputs = _opCreator->createElementwise(
-          inputs, params, ops::ElementwiseOp::OpType::sub,
-          op->builtin_options_as_SubOptions()->fused_activation_function());
+          ops::ElementwiseOp::OpType::sub,
+          op->builtin_options_as_SubOptions()->fused_activation_function(), inputs);
       break;
     case BuiltinOperator_MUL:
       outputs = _opCreator->createElementwise(
-          inputs, params, ops::ElementwiseOp::OpType::mul,
-          op->builtin_options_as_MulOptions()->fused_activation_function());
+          ops::ElementwiseOp::OpType::mul,
+          op->builtin_options_as_MulOptions()->fused_activation_function(), inputs);
       break;
     case BuiltinOperator_DIV:
       outputs = _opCreator->createElementwise(
-          inputs, params, ops::ElementwiseOp::OpType::div,
-          op->builtin_options_as_DivOptions()->fused_activation_function());
+          ops::ElementwiseOp::OpType::div,
+          op->builtin_options_as_DivOptions()->fused_activation_function(), inputs);
       break;
     case BuiltinOperator_MAXIMUM:
-      outputs = _opCreator->createElementwise(inputs, params, ops::ElementwiseOp::OpType::max,
-                                              ActivationFunctionType_NONE); // no activation
+      outputs = _opCreator->createElementwise(ops::ElementwiseOp::OpType::max,
+                                              ActivationFunctionType_NONE, inputs); // no activation
       break;
     case BuiltinOperator_SQUARED_DIFFERENCE:
-      outputs = _opCreator->convertSquaredDifference(inputs, params);
+      outputs = _opCreator->convertSquaredDifference(inputs);
       break;
-    case BuiltinOperator_TRANSPOSE_CONV: {
-      auto tensor = (*_tensors)[op->outputs()->Get(0)];
-      auto out_shape = ShapeHelper::createShape(*tensor->shape(), tensor->shape()->size());
-      outputs = _opCreator->convertTransposeConv(inputs, params,
-                                                 op->builtin_options_as<TransposeConvOptions>(),
-                                                 out_shape);
+    case BuiltinOperator_TRANSPOSE_CONV:
+      outputs = _opCreator->convertTransposeConv(op->builtin_options_as<TransposeConvOptions>(),
+                                                 inputs);
       break;
-    }
     case BuiltinOperator_PAD:
-      outputs = _opCreator->convertPad(inputs, params, op->builtin_options_as<PadOptions>());
+      outputs = _opCreator->convertPad(op->builtin_options_as<PadOptions>(), inputs);
       break;
     case BuiltinOperator_TANH:
       outputs = _opCreator->convertTanh(inputs);
@@ -262,15 +262,15 @@ void TfliteImporter::walkOperator(const Operator* op) {
       outputs = _opCreator->convertReLU6(inputs);
       break;
     case BuiltinOperator_TRANSPOSE:
-      outputs = _opCreator->convertTranspose(inputs, params,
-                                             op->builtin_options_as<TransposeOptions>());
+      outputs = _opCreator->convertTranspose(
+          op->builtin_options_as<TransposeOptions>(), inputs);
       break;
     case BuiltinOperator_STRIDED_SLICE:
-      outputs = _opCreator->convertStridedSlice(inputs, params,
-                                                op->builtin_options_as<StridedSliceOptions>());
+      outputs = _opCreator->convertStridedSlice(
+          op->builtin_options_as<StridedSliceOptions>(), inputs);
       break;
     case BuiltinOperator_LEAKY_RELU:
-      outputs = _opCreator->convertLeakyReLU(inputs, op->builtin_options_as<LeakyReluOptions>());
+      outputs = _opCreator->convertLeakyReLU(op->builtin_options_as<LeakyReluOptions>(), inputs);
       break;
     default:
       assert(false && "All unsupported types should have been found before this pass.");
@@ -288,12 +288,17 @@ std::vector<mir::IODescriptor> TfliteImporter::getMIRInputsForOperator(const Ope
 
   try {
     for (auto i : *(op->inputs())) {
-      int buffer_idx = (*_tensors)[i]->buffer();
-      if ((*_buffers)[buffer_idx]->data() == nullptr) {
+      const Tensor* tensor = (*_tensors)[i];
+      const Buffer* buffer = (*_buffers)[tensor->buffer()];
+      if (buffer->data() != nullptr) {
+        assert(_tensorMap.find(i) == _tensorMap.end());
+        mir::TensorVariant mir_tensor = createTensor(tensor, buffer);
+        inputs.emplace_back(_graph->create<ops::ConstantOp>("", mir_tensor)->getOutput(0));
+      } else {
         // By this point every input for the operation "op" should have corresponding
         // Model IR operations that output its inputs. This assumption is provided by the fact
         // that TFLite format specifies all operations in the execution order.
-        inputs.push_back(_tensorMap.at(i));
+        inputs.emplace_back(_tensorMap.at(i));
       }
     }
   } catch (const std::out_of_range& e) {
@@ -304,38 +309,6 @@ std::vector<mir::IODescriptor> TfliteImporter::getMIRInputsForOperator(const Ope
   return inputs;
 }
 
-std::vector<mir::TensorVariant> TfliteImporter::createOpParams(const Operator* op) {
-  std::vector<mir::TensorVariant> params_for_op;
-
-  for (auto i : *(op->inputs())) {
-    const Tensor* t = (*_tensors)[i];
-    const Buffer* b = (*_buffers)[t->buffer()];
-    if (b->data() != nullptr) {
-      auto tensor = createTensor(t, b);
-
-      BuiltinOperator opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
-
-      if ((opcode == BuiltinOperator_CONV_2D || opcode == BuiltinOperator_DEPTHWISE_CONV_2D)
-          && t->shape()->size() == 4) {
-        // Change dimension indices [0, 1, 2, 3] to [1, 2, 3, 0].
-        // This is needed because TFLite convolution weights are stored as NHWC, and we use HWCN.
-        // TODO: Currently this is only used by the interpreter and shape inference,
-        // don't forget to change this if tensor shape processing architecture changes.
-        params_for_op.emplace_back(mir::transposeTensor<1, 2, 3, 0>(tensor));
-      } else if (opcode == BuiltinOperator_TRANSPOSE_CONV && t->shape()->size() == 4) {
-        //Tflite uses [in, H, W, out] and we expect kernel to be [H, W, in, out]
-        params_for_op.emplace_back(mir::transposeTensor<1, 2, 0, 3>(tensor));
-      } else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2) {
-        params_for_op.emplace_back(mir::transposeTensor<1, 0>(tensor));
-      } else {
-        params_for_op.emplace_back(std::move(tensor));
-      }
-    }
-  }
-
-  return params_for_op;
-}
-
 mir::TensorVariant TfliteImporter::createTensor(const Tensor* t, const Buffer* b) {
   // Create TensorVariant by copying the tensor buffer contents.
   // Another option is to copy the data in a TensorVariant constructor.
index 07204b2..cbfcf7d 100644 (file)
@@ -108,11 +108,6 @@ private:
   void setIrNodeNames();
 
   /**
-  * @brief Prepare data for creating an MIR node/operation.
-  */
-  std::vector<mir::TensorVariant> createOpParams(const ::tflite::Operator* op);
-
-  /**
   * @brief Return MIR ops, preceding given tflite operator
   */
   std::vector<mir::IODescriptor> getMIRInputsForOperator(const ::tflite::Operator* op);
index ef221d4..90b8ed6 100644 (file)
 #include "core/modelIR/operations/TransposeOp.h"
 
 #include "pass/PassException.h"
+#include "option/Options.h"
 
 #include "core/modelIR/Shape.h"
 #include "core/modelIR/ShapeRange.h"
 #include "core/modelIR/Tensor.h"
+#include "core/modelIR/TensorUtil.h"
 
 using namespace ::tflite;
 
@@ -78,28 +80,49 @@ static void calculatePadding(tflite::Padding padding,
   }
 }
 
+template<typename VectorT>
+static std::vector<VectorT> convertIntTensorToVector(const mir::Tensor<int32_t>& tensor) {
+  std::vector<VectorT> v;
+  for (const auto& i : mir::ShapeRange(tensor.getShape()))
+    v.emplace_back(static_cast<VectorT>(tensor.at(i)));
+  return v;
+}
+
+static const mir::TensorVariant& extractTensor(mir::IODescriptor descr) {
+  auto constant_op = dynamic_cast<ops::ConstantOp*>(descr.op);
+  if (constant_op == nullptr)
+    throw PassException("Non-constant input is not supported.");
+  return constant_op->getValue();
+}
+
 void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
                                   std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertConv2D(const std::vector<mir::IODescriptor>& inputs,
-                               const std::vector<mir::TensorVariant>& params,
-                               const Conv2DOptions* opts) {
-  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
-  const auto& kernel_shape = params[0].getShape();
+TFLiteOpCreator::convertConv2D(const Conv2DOptions* opts,
+                               const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  auto kernel = inputs.at(1);
+  auto bias = inputs.at(2);
+
+  // OHWI -> HWIO
+  // TODO Insert TransposeOp instead when ACL backend is ready for that.
+  const auto& kernel_tensor = mir::transposeTensor<1, 2, 3, 0>(extractTensor(kernel));
+  kernel = createOp<ops::ConstantOp>(kernel_tensor)->getOutput(0);
+
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
   std::vector<int32_t> padding_after(2);
 
-  calculatePadding(opts->padding(), input_shape, kernel_shape, strides, padding_before,
-                   padding_after);
+  const auto& input_shape = input.getShape();
+  const auto& kernel_shape = kernel.getShape();
+  calculatePadding(opts->padding(), input_shape, kernel_shape,
+                   strides, padding_before, padding_after);
 
-  auto kernel = createOp<ops::ConstantOp>(params[0])->getOutput(0);
-  auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, strides, padding_before, padding_after);
-  auto bias = createOp<ops::ConstantOp>(params[1]);
-  result = createOp<ops::BiasAddOp>(result->getOutput(0), bias->getOutput(0));
+  auto result = createOp<ops::Conv2DOp>(input, kernel, strides, padding_before, padding_after);
+  result = createOp<ops::BiasAddOp>(result->getOutput(0), bias);
   return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
@@ -109,23 +132,29 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertDepthwiseConv2D(const std::vector<mir::IODescriptor>& inputs,
-                                        const std::vector<mir::TensorVariant>& params,
-                                        const DepthwiseConv2DOptions* opts) {
-  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
-  const auto& kernel_shape = params[0].getShape();
+TFLiteOpCreator::convertDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
+                                        const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  auto kernel = inputs.at(1);
+  auto bias = inputs.at(2);
+
+  // OHWI -> HWIO
+  // TODO Insert TransposeOp instead when ACL backend is ready for that.
+  const auto& kernel_tensor = mir::transposeTensor<1, 2, 3, 0>(extractTensor(kernel));
+  kernel = createOp<ops::ConstantOp>(kernel_tensor)->getOutput(0);
+
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
   std::vector<int32_t> padding_after(2);
 
-  calculatePadding(opts->padding(), input_shape, kernel_shape, strides, padding_before,
-                   padding_after);
+  const auto& input_shape = input.getShape();
+  const auto& kernel_shape = kernel.getShape();
+  calculatePadding(opts->padding(), input_shape, kernel_shape,
+                   strides, padding_before, padding_after);
 
-  auto kernel = createOp<ops::ConstantOp>(params[0])->getOutput(0);
-  auto result = createOp<ops::DepthwiseConv2DOp>(inputs[0], kernel,
+  auto result = createOp<ops::DepthwiseConv2DOp>(input, kernel,
                                                  strides, padding_before, padding_after);
-  auto bias = createOp<ops::ConstantOp>(params[1]);
-  result = createOp<ops::BiasAddOp>(result->getOutput(0), bias->getOutput(0));
+  result = createOp<ops::BiasAddOp>(result->getOutput(0), bias);
   return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
 
@@ -135,8 +164,8 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertConcatenation(const std::vector<mir::IODescriptor>& inputs,
-                                      const ::tflite::ConcatenationOptions* opts) {
+TFLiteOpCreator::convertConcatenation(const ::tflite::ConcatenationOptions* opts,
+                                      const std::vector<mir::IODescriptor>& inputs) {
   auto result = createOp<ops::ConcatOp>(inputs, opts->axis());
   return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
@@ -147,18 +176,20 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertMaxPool2D(const std::vector<mir::IODescriptor>& inputs,
-                                  const ::tflite::Pool2DOptions* opts) {
-  auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
+TFLiteOpCreator::convertMaxPool2D(const ::tflite::Pool2DOptions* opts,
+                                  const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+
+  const auto& input_shape = input.getShape();
   Shape window_shape{opts->filter_height(), opts->filter_width()};
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
   std::vector<int32_t> padding_after(2);
 
-  calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
-                   padding_after);
+  calculatePadding(opts->padding(), input_shape, window_shape,
+                   strides, padding_before, padding_after);
 
-  auto result = createOp<ops::PoolOp>(inputs[0], ops::PoolOp::PoolingType::MAX,
+  auto result = createOp<ops::PoolOp>(input, ops::PoolOp::PoolingType::MAX,
                                       window_shape, strides, padding_before, padding_after,
                                       ops::PoolOp::BorderType::EMPTY,
                                       ops::PoolOp::RoundMode::floor);
@@ -166,18 +197,20 @@ TFLiteOpCreator::convertMaxPool2D(const std::vector<mir::IODescriptor>& inputs,
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertAveragePool2D(const std::vector<mir::IODescriptor>& inputs,
-                                      const ::tflite::Pool2DOptions* opts) {
-  auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
+TFLiteOpCreator::convertAveragePool2D(const ::tflite::Pool2DOptions* opts,
+                                      const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+
+  const auto& input_shape = input.getShape();
   Shape window_shape{opts->filter_height(), opts->filter_width()};
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
   std::vector<int32_t> padding_after(2);
 
-  calculatePadding(opts->padding(), input_shape, window_shape, strides, padding_before,
-                   padding_after);
+  calculatePadding(opts->padding(), input_shape, window_shape,
+                   strides, padding_before, padding_after);
 
-  auto result = createOp<ops::PoolOp>(inputs[0], ops::PoolOp::PoolingType::AVG,
+  auto result = createOp<ops::PoolOp>(input, ops::PoolOp::PoolingType::AVG,
                                       window_shape, strides, padding_before, padding_after,
                                       ops::PoolOp::BorderType::EMPTY,
                                       ops::PoolOp::RoundMode::floor);
@@ -185,106 +218,95 @@ TFLiteOpCreator::convertAveragePool2D(const std::vector<mir::IODescriptor>& inpu
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
-                                const ::tflite::SoftmaxOptions* opts) {
+TFLiteOpCreator::convertSoftmax(const ::tflite::SoftmaxOptions* opts,
+                                const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+
   // Softmax in TFLite is always 2-D.
-  assert(inputs[0].op->getOutputShape(inputs[0].index).rank() == 2);
+  assert(input.getShape().rank() == 2);
   const int32_t axis = 1;
-  auto result = createOp<ops::SoftmaxOp>(inputs[0], axis);
+  auto result = createOp<ops::SoftmaxOp>(input, axis);
   return {result->getOutput(0)};
 }
 
-Shape shapeFromTensor(mir::Tensor<int32_t>&& t) {
-  Shape temporary_shape(4);
-  int j = 0;
-  for (auto i : mir::ShapeRange(t.getShape())) {
-    temporary_shape.dim(j++) = t.at(i);
-  }
-  return temporary_shape;
-}
-
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertSlice(const std::vector<mir::IODescriptor>& inputs,
-                              const std::vector<mir::TensorVariant>& params,
-                              const ::tflite::SliceOptions* opts) {
-  auto starts = shapeFromTensor(mir::Tensor<int32_t>(params[0]));
-  auto sizes = shapeFromTensor(mir::Tensor<int32_t>(params[1]));
-  assert(starts.rank() == inputs[0].op->getOutputShape(inputs[0].index).rank() &&
-         starts.rank() == sizes.rank());
-  auto result = createOp<ops::SliceOp>(inputs[0], starts, sizes);
+TFLiteOpCreator::convertSlice(const ::tflite::SliceOptions* opts,
+                              const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
+  mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(2)));
+
+  Shape starts(convertIntTensorToVector<int32_t>(begin_tensor));
+  Shape sizes(convertIntTensorToVector<int32_t>(size_tensor));
+  auto result = createOp<ops::SliceOp>(input, starts, sizes);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs,
-                                const ::tflite::ReshapeOptions* opts) {
+TFLiteOpCreator::convertReshape(const ::tflite::ReshapeOptions* opts,
+                                const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+
   // TODO: we should also support "-1" values in new_shape, which means that correct
   // shape values must be calculated. Better do it in the shape inference module.
   Shape new_shape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
-  auto result = createOp<ops::ReshapeOp>(inputs[0], new_shape);
+  auto result = createOp<ops::ReshapeOp>(input, new_shape);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertTransposeConv(const std::vector<mir::IODescriptor>& inputs,
-                                      const std::vector<mir::TensorVariant>& params,
-                                      const ::tflite::TransposeConvOptions* opts,
-                                      const Shape& output_shape) {
+TFLiteOpCreator::convertTransposeConv(const ::tflite::TransposeConvOptions* opts,
+                                      const std::vector<mir::IODescriptor>& inputs) {
+  mir::Tensor<int32_t> output_shape_tensor(extractTensor(inputs.at(0)));
+  auto kernel = inputs.at(1);
+  auto input = inputs.at(2);
+
   Shape strides{opts->stride_h(), opts->stride_w()};
+  Shape output_shape(convertIntTensorToVector<int32_t>(output_shape_tensor));
 
-  auto kernel = createOp<ops::ConstantOp>(params[1])->getOutput(0);
-  auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel,
+  // OHWI -> HWOI
+  // TODO Insert TransposeOp instead when ACL backend is ready for that.
+  const auto& kernel_tensor = mir::transposeTensor<1, 2, 0, 3>(extractTensor(kernel));
+  kernel = createOp<ops::ConstantOp>(kernel_tensor)->getOutput(0);
+
+  auto result = createOp<ops::DeConv2DOp>(input, kernel,
                                           strides, paddingMap[opts->padding()], output_shape);
   return {result->getOutput(0)};
 }
 
+void TFLiteOpCreator::checkResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
+                                                 std::set<std::string>& problems_op_set) {
+  if (opts->align_corners())
+    problems_op_set.insert("'align_corners' is not currently supported");
+}
+
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertResizeNN(const std::vector<mir::IODescriptor>& inputs,
-                                 const std::vector<mir::TensorVariant>& params,
-                                 const ::tflite::ResizeNearestNeighborOptions* opts) {
-  // TODO support aligned corners
-  assert(!opts->align_corners() && "Aligned corners not currently supported");
-
-  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
-  assert(input_shape.rank() == 4);
-  mir::Tensor<int> out_shapes(params[0]);
-  Shape res_shape(4);
-  res_shape.dim(0) = input_shape.dim(0);
-  res_shape.dim(1) = out_shapes.at(mir::Index{0});
-  res_shape.dim(2) = out_shapes.at(mir::Index{1});
-  res_shape.dim(3) = input_shape.dim(3);
-  auto result = createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor,
+TFLiteOpCreator::convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
+                                              const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(1)));
+
+  const auto& input_shape = input.getShape();
+  Shape res_shape{input_shape.dim(0),
+                  size_tensor.at(mir::Index{0}),
+                  size_tensor.at(mir::Index{1}),
+                  input_shape.dim(3)};
+  auto result = createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor,
                                         res_shape);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::createElementwise(const std::vector<mir::IODescriptor>& inputs,
-                                   const std::vector<mir::TensorVariant>& params,
-                                   ops::ElementwiseOp::OpType op_type,
-                                   ::tflite::ActivationFunctionType activation) {
-  std::vector<mir::IODescriptor> descriptors = inputs;
-
-  for (const auto& param : params) {
-    auto weights_tensor = createOp<ops::ConstantOp>(param);
-    descriptors.push_back(weights_tensor->getOutput(0));
-  }
-
-  auto result = createOp<ops::ElementwiseOp>(descriptors, op_type);
+TFLiteOpCreator::createElementwise(ops::ElementwiseOp::OpType op_type,
+                                   ::tflite::ActivationFunctionType activation,
+                                   const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::ElementwiseOp>(inputs, op_type);
   return {addFusedActivation(result->getOutput(0), activation)};
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs,
-                                          const std::vector<mir::TensorVariant>& params) {
-  std::vector<mir::IODescriptor> descriptors = inputs;
-
-  for (const auto& param : params) {
-    auto weights_tensor = createOp<ops::ConstantOp>(param);
-    descriptors.push_back(weights_tensor->getOutput(0));
-  }
-
-  auto result = createOp<ops::ElementwiseOp>(descriptors, ops::ElementwiseOp::OpType::sub);
+TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub);
   result = createOp<ops::ElementwiseOp>(std::vector<mir::IODescriptor>{
                                             result->getOutput(0),
                                             result->getOutput(0)},
@@ -293,21 +315,14 @@ TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::IODescriptor>&
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertReducer(const std::vector<mir::IODescriptor>& inputs,
-                                const std::vector<mir::TensorVariant>& params,
-                                ops::ReduceFOp::FuncType ft,
-                                const ::tflite::ReducerOptions* opts) {
-  assert(params.at(0).getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
-  mir::Tensor<int> tensor(params.at(0));
-  std::vector<int32_t> axes;
-
-  for (const auto& i : mir::ShapeRange(tensor.getShape())) {
-    axes.emplace_back(tensor.at(i));
-  }
-
-  std::sort(axes.begin(), axes.end());
-
-  auto result = createOp<ops::ReduceFOp>(inputs[0], axes, opts->keep_dims(), ft);
+TFLiteOpCreator::convertMean(const ::tflite::ReducerOptions* opts,
+                             const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  mir::Tensor<int32_t> axes_tensor(extractTensor(inputs.at(1)));
+
+  std::vector<int32_t> axes = convertIntTensorToVector<int32_t>(axes_tensor);
+  auto result = createOp<ops::ReduceFOp>(input, axes, opts->keep_dims(),
+                                         ops::ReduceFOp::FuncType::mean);
   return {result->getOutput(0)};
 }
 
@@ -317,15 +332,23 @@ void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertFullyConnected(const std::vector<mir::IODescriptor>& inputs,
-                                       const std::vector<mir::TensorVariant>& params,
-                                       const ::tflite::FullyConnectedOptions* opts) {
-  // Add Reshape operation to make sure the input for FC operation has shape [1, fc_input_size]
-  int32_t fc_input_size = params[0].getShape().dim(0);
-  auto flatten = createOp<ops::ReshapeOp>(inputs[0], Shape{1, fc_input_size});
-  auto weights = createOp<ops::ConstantOp>(params[0])->getOutput(0);
+TFLiteOpCreator::convertFullyConnected(const ::tflite::FullyConnectedOptions* opts,
+                                       const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  auto weights = inputs.at(1);
+  auto bias = inputs.at(2);
+
+  // Flatten input to 2-D shape.
+  const auto& input_shape = input.getShape();
+  int32_t outer_size = input_shape.dim(0);
+  int32_t inner_size = input_shape.numElements() / outer_size;
+  auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size});
+
+  // TODO Insert TransposeOp instead when ACL backend is ready for that.
+  const auto& weights_tensor = mir::transposeTensor<1, 0>(extractTensor(weights));
+  weights = createOp<ops::ConstantOp>(weights_tensor)->getOutput(0);
+
   auto result = createOp<ops::FullyConnectedOp>(flatten->getOutput(0), weights);
-  auto bias = createOp<ops::ConstantOp>(params[1])->getOutput(0);
   result = createOp<ops::BiasAddOp>(result->getOutput(0), bias);
   return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
 }
@@ -358,97 +381,110 @@ mir::IODescriptor TFLiteOpCreator::addFusedActivation(mir::IODescriptor input,
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertSqueeze(const std::vector<mir::IODescriptor>& inputs,
-                                const ::tflite::SqueezeOptions* opts) {
-  std::vector<int32_t> squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()};
+TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions* opts,
+                                const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
 
-  auto result = createOp<ops::SqueezeOp>(inputs[0], squeeze_dims);
+  std::vector<int32_t> squeeze_dims(opts->squeeze_dims()->begin(),
+                                    opts->squeeze_dims()->end());
+  auto result = createOp<ops::SqueezeOp>(input, squeeze_dims);
   return {result->getOutput(0)};
 }
 
-
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertPad(const std::vector<mir::IODescriptor>& inputs,
-                            const std::vector<mir::TensorVariant>& params,
-                            const ::tflite::PadOptions* opts) {
-
-  assert(params.size() == 1); // support pad with one param
-  std::vector<std::pair<int32_t, int32_t>> paddings;
+TFLiteOpCreator::convertPad(const ::tflite::PadOptions* opts,
+                            const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
 
-  mir::Tensor<int32_t> paddings_tensor(params[0]);
-  // check right paddings structure
-  assert(paddings_tensor.getShape().dim(1) == 2);
+  const auto& input_shape = input.getShape();
+  int32_t num_dims = input_shape.rank();
 
-  int32_t num_dims = paddings_tensor.getShape().dim(0);
+  std::vector<std::pair<int32_t, int32_t>> paddings;
+  for (int axis = 0; axis < num_dims; axis++)
+    paddings.emplace_back(paddings_tensor.at(mir::Index({axis, 0})),
+                          paddings_tensor.at(mir::Index({axis, 1})));
 
-  paddings.reserve(static_cast<size_t>(num_dims));
-  // create strucuture with paddings
-  for (int i = 0; i < num_dims; i++)
-    paddings.emplace_back(paddings_tensor.at(mir::Index({i, 0})),
-                          paddings_tensor.at(mir::Index({i, 1})));
-  // create const value, it's float because we can't see input type
-  float const_value = 0.0; // not support different constant value
-  // create scalar with constant value
-  mir::Scalar constant_value(reinterpret_cast<char*>(&const_value),
-                             mir::DTYPE::FLOAT32, sizeof(float));
+  float filler_value = 0.0;
+  mir::Scalar filler(reinterpret_cast<char*>(&filler_value),
+                     mir::DTYPE::FLOAT32, sizeof(filler_value));
 
-  auto result = createOp<ops::PadOp>(inputs[0], num_dims, paddings, constant_value);
+  // FIXME Do we really need num_dims as an argument? It looks redundant.
+  auto result = createOp<ops::PadOp>(input, num_dims, paddings, filler);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
 TFLiteOpCreator::convertTanh(const std::vector<mir::IODescriptor>& inputs) {
-  auto result = createOp<ops::TanhOp>(inputs[0]);
+  auto input = inputs.at(0);
+
+  auto result = createOp<ops::TanhOp>(input);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
 TFLiteOpCreator::convertReLU(const std::vector<mir::IODescriptor>& inputs) {
-  auto result = createOp<ops::ReluOp>(inputs[0]);
+  auto input = inputs.at(0);
+
+  auto result = createOp<ops::ReluOp>(input);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
 TFLiteOpCreator::convertReLU6(const std::vector<mir::IODescriptor>& inputs) {
-  auto result = createOp<ops::CappedReluOp>(inputs[0], 6);
+  auto input = inputs.at(0);
+
+  auto result = createOp<ops::CappedReluOp>(input, 6);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
 TFLiteOpCreator::convertSqrt(const std::vector<mir::IODescriptor>& inputs) {
-  auto result = createOp<ops::SqrtOp>(inputs[0]);
+  auto input = inputs.at(0);
+
+  auto result = createOp<ops::SqrtOp>(input);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
 TFLiteOpCreator::convertLogistic(const std::vector<mir::IODescriptor>& inputs) {
-  auto result = createOp<ops::SigmoidOp>(inputs[0]);
+  auto input = inputs.at(0);
+
+  auto result = createOp<ops::SigmoidOp>(input);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertTranspose(const std::vector<mir::IODescriptor>& inputs,
-                                  const std::vector<mir::TensorVariant>& params,
-                                  const ::tflite::TransposeOptions* opts) {
-  assert(params.size() == 1);
-  std::vector<std::size_t> axis_order;
+TFLiteOpCreator::convertTranspose(const ::tflite::TransposeOptions* opts,
+                                  const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  mir::Tensor<int32_t> perm_tensor(extractTensor(inputs.at(1)));
 
-  mir::Tensor<int32_t> permutation_tensor(params[0]);
+  std::vector<std::size_t> axis_order = convertIntTensorToVector<std::size_t>(perm_tensor);
+  auto result = createOp<ops::TransposeOp>(input, axis_order);
+  return {result->getOutput(0)};
+}
 
-  mir::ShapeRange range(permutation_tensor.getShape());
-  for (const auto& index : range) {
-    axis_order.push_back(permutation_tensor.at(index));
-  }
+void TFLiteOpCreator::checkStridedSlice(const ::tflite::StridedSliceOptions* opts,
+                                        std::set<std::string>& problems_op_set) {
+  if (opts->ellipsis_mask() != 0)
+    problems_op_set.insert("StridedSlice: parameter 'ellipsis_mask' is not supported.");
 
-  auto result = createOp<ops::TransposeOp>(inputs[0], axis_order);
-  return {result->getOutput(0)};
+  if (opts->new_axis_mask() != 0)
+    problems_op_set.insert("StridedSlice: parameter 'new_axis_mask' is not supported.");
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertStridedSlice(const std::vector<mir::IODescriptor>& inputs,
-                                     const std::vector<mir::TensorVariant>& params,
-                                     const ::tflite::StridedSliceOptions* opts) {
-  assert(params.size() == 3);
+TFLiteOpCreator::convertStridedSlice(const ::tflite::StridedSliceOptions* opts,
+                                     const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+  mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
+  mir::Tensor<int32_t> end_tensor(extractTensor(inputs.at(2)));
+  mir::Tensor<int32_t> strides_tensor(extractTensor(inputs.at(3)));
+
+  std::vector<int32_t> begin = convertIntTensorToVector<int32_t>(begin_tensor);
+  std::vector<int32_t> end = convertIntTensorToVector<int32_t>(end_tensor);
+  std::vector<int32_t> strides = convertIntTensorToVector<int32_t>(strides_tensor);
 
   int32_t begin_mask = opts->begin_mask();
   int32_t end_mask = opts->end_mask();
@@ -456,23 +492,12 @@ TFLiteOpCreator::convertStridedSlice(const std::vector<mir::IODescriptor>& input
   int32_t new_axis_mask = opts->new_axis_mask();
   int32_t shrink_axis_mask = opts->shrink_axis_mask();
 
-  // params not used on tflite
-  assert(ellipsis_mask == 0 && "Ellipsis mask not used");
-  assert(new_axis_mask == 0 && "New axis mask not used");
-
-  mir::Tensor<int32_t> begin(params[0]);
-  mir::Tensor<int32_t> end(params[1]);
+  const auto& input_shape = input.getShape();
+  int32_t num_dims = input_shape.rank();
 
-  int32_t num_dims = begin.getShape().numElements();
-
-  assert(num_dims == inputs[0].op->getOutputShape(inputs[0].index).rank() &&
-         num_dims == end.getShape().numElements());
-
-  mir::Tensor<int32_t> strides(params[2]);
-  // support only strides == 1
-  mir::ShapeRange strides_range(strides.getShape());
-  for (const auto& index: strides_range) {
-    assert(strides.at(index) == 1 && "Strides not equal 1 not supported");
+  for (int32_t stride : strides) {
+    if (stride != 1)
+      throw PassException("StridedSlice: parameter 'strides' is not supported");
   }
 
   Shape start(num_dims);
@@ -482,26 +507,28 @@ TFLiteOpCreator::convertStridedSlice(const std::vector<mir::IODescriptor>& input
     if (begin_mask & (1 << axis))
       start.dim(axis) = 0;
     else
-      start.dim(axis) = begin.at(mir::Index{axis});
+      start.dim(axis) = begin.at(axis);
 
     if (end_mask & (1 << axis))
-      size.dim(axis) = inputs[0].op->getOutputShape(inputs[0].index).dim(axis) - start.dim(axis);
+      size.dim(axis) = input_shape.dim(axis) - start.dim(axis);
     else
-      size.dim(axis) = end.at(mir::Index{axis}) - start.dim(axis);
+      size.dim(axis) = end.at(axis) - start.dim(axis);
     
     if (shrink_axis_mask & (1 << axis))
       squeeze_dims.push_back(axis);
   }
 
-  auto result = createOp<ops::SliceOp>(inputs[0], start, size);
+  auto result = createOp<ops::SliceOp>(input, start, size);
   result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
   return {result->getOutput(0)};
 }
 
 std::vector<mir::IODescriptor>
-TFLiteOpCreator::convertLeakyReLU(const std::vector<mir::IODescriptor>& inputs,
-                                  const ::tflite::LeakyReluOptions* opts) {
-  auto result = createOp<ops::LeakyReluOp>(inputs[0], opts->alpha());
+TFLiteOpCreator::convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
+                                  const std::vector<mir::IODescriptor>& inputs) {
+  auto input = inputs.at(0);
+
+  auto result = createOp<ops::LeakyReluOp>(input, opts->alpha());
   return {result->getOutput(0)};
 }
 
index de29b58..ef40c9c 100644 (file)
@@ -46,55 +46,48 @@ public:
   explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
 
   std::vector<mir::IODescriptor>
-  convertConv2D(const std::vector<mir::IODescriptor>& inputs,
-                const std::vector<mir::TensorVariant>& params,
-                const ::tflite::Conv2DOptions* opts);
+  convertConv2D(const ::tflite::Conv2DOptions* opts,
+                const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertDepthwiseConv2D(const std::vector<mir::IODescriptor>& inputs,
-                         const std::vector<mir::TensorVariant>& params,
-                         const ::tflite::DepthwiseConv2DOptions* opts);
+  convertDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions* opts,
+                         const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertConcatenation(const std::vector<mir::IODescriptor>& inputs,
-                       const ::tflite::ConcatenationOptions* opts);
+  convertConcatenation(const ::tflite::ConcatenationOptions* opts,
+                       const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertMaxPool2D(const std::vector<mir::IODescriptor>& inputs,
-                   const ::tflite::Pool2DOptions* opts);
+  convertMaxPool2D(const ::tflite::Pool2DOptions* opts,
+                   const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertAveragePool2D(const std::vector<mir::IODescriptor>& inputs,
-                       const ::tflite::Pool2DOptions* opts);
+  convertAveragePool2D(const ::tflite::Pool2DOptions* opts,
+                       const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertReducer(const std::vector<mir::IODescriptor>& inputs,
-                 const std::vector<mir::TensorVariant>& params,
-                 ops::ReduceFOp::FuncType ft,
-                 const ::tflite::ReducerOptions* opts);
+  convertMean(const ::tflite::ReducerOptions* opts,
+              const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertSoftmax(const std::vector<mir::IODescriptor>& inputs,
-                 const ::tflite::SoftmaxOptions* opts);
+  convertSoftmax(const ::tflite::SoftmaxOptions* opts,
+                 const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertSlice(const std::vector<mir::IODescriptor>& inputs,
-               const std::vector<mir::TensorVariant>& params,
-               const ::tflite::SliceOptions* opts);
+  convertSlice(const ::tflite::SliceOptions* opts,
+               const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertReshape(const std::vector<mir::IODescriptor>& inputs,
-                 const ::tflite::ReshapeOptions* opts);
+  convertReshape(const ::tflite::ReshapeOptions* opts,
+                 const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertFullyConnected(const std::vector<mir::IODescriptor>& inputs,
-                        const std::vector<mir::TensorVariant>& params,
-                        const ::tflite::FullyConnectedOptions* opts);
+  convertFullyConnected(const ::tflite::FullyConnectedOptions* opts,
+                        const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertResizeNN(const std::vector<mir::IODescriptor>& inputs,
-                  const std::vector<mir::TensorVariant>& params,
-                  const ::tflite::ResizeNearestNeighborOptions* opts);
+  convertResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
+                               const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
   convertLogistic(const std::vector<mir::IODescriptor>& inputs);
@@ -103,19 +96,16 @@ public:
   convertSqrt(const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertSqueeze(const std::vector<mir::IODescriptor>& inputs,
-                 const ::tflite::SqueezeOptions* opts);
+  convertSqueeze(const ::tflite::SqueezeOptions* opts,
+                 const std::vector<mir::IODescriptor>& inputs);
 
-  /** @brief Elementwise Operation */
   std::vector<mir::IODescriptor>
-  createElementwise(const std::vector<mir::IODescriptor>& inputs,
-                    const std::vector<mir::TensorVariant>& params,
-                    ops::ElementwiseOp::OpType op_type,
-                    ::tflite::ActivationFunctionType activation);
+  createElementwise(ops::ElementwiseOp::OpType op_type,
+                    ::tflite::ActivationFunctionType activation,
+                    const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
-  convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs,
-                           const std::vector<mir::TensorVariant>& params);
+  convertSquaredDifference(const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
   convertTanh(const std::vector<mir::IODescriptor>& inputs);
@@ -126,70 +116,46 @@ public:
   std::vector<mir::IODescriptor>
   convertReLU6(const std::vector<mir::IODescriptor>& inputs);
 
-  /**
- * @brief Creates a Transposed convolution
- * @param params 0 - output shape (unused), 1 - kernel, 2- input
- */
   std::vector<mir::IODescriptor>
-  convertTransposeConv(const std::vector<mir::IODescriptor>& inputs,
-                       const std::vector<mir::TensorVariant>& params,
-                       const ::tflite::TransposeConvOptions* opts,
-                       const Shape& output_shape);
+  convertTransposeConv(const ::tflite::TransposeConvOptions* opts,
+                       const std::vector<mir::IODescriptor>& inputs);
 
-  /**
-   * @brief Create a Pad operation
-   * @param inputs Operations vector
-   * @param params Tensor with paddings for each dimension
-   * @param opts TFLite PadOptions
-   * @return Operations vector
-   */
   std::vector<mir::IODescriptor>
-  convertPad(const std::vector<mir::IODescriptor>& inputs,
-             const std::vector<mir::TensorVariant>& params,
-             const ::tflite::PadOptions* opts);
+  convertPad(const ::tflite::PadOptions* opts,
+             const std::vector<mir::IODescriptor>& inputs);
 
-  /**
-   * @brief Create a Transpose operation
-   * @param inputs Operations vector
-   * @param params Tensor with axis order
-   * @param opts TFLite TransposeOptions
-   * @return Operations vector
-   */
   std::vector<mir::IODescriptor>
-  convertTranspose(const std::vector<mir::IODescriptor>& inputs,
-                   const std::vector<mir::TensorVariant>& params,
-                   const ::tflite::TransposeOptions* opts);
+  convertTranspose(const ::tflite::TransposeOptions* opts,
+                   const std::vector<mir::IODescriptor>& inputs);
 
-  /**
-   * @brief Create a Strided Slice operation
-   * @param inputs Operations vector
-   * @param params Tensors: begin, end, strides
-   * @param opts TFLite StridedSliceOptions
-   * @return Operations vector
-   */
   std::vector<mir::IODescriptor>
-  convertStridedSlice(const std::vector<mir::IODescriptor>& inputs,
-                      const std::vector<mir::TensorVariant>& params,
-                      const ::tflite::StridedSliceOptions* opts);
+  convertStridedSlice(const ::tflite::StridedSliceOptions* opts,
+                      const std::vector<mir::IODescriptor>& inputs);
 
-  /**
-   * @brief Create leaky relu activation
-   * @return
-   */
   std::vector<mir::IODescriptor>
-  convertLeakyReLU(const std::vector<mir::IODescriptor>& inputs,
-                   const ::tflite::LeakyReluOptions* opts);
+  convertLeakyReLU(const ::tflite::LeakyReluOptions* opts,
+                   const std::vector<mir::IODescriptor>& inputs);
+
+  void checkPool2D(const ::tflite::Pool2DOptions* opts,
+                   std::set<std::string>& problem_ops_set);
 
-  void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
+  void checkConcatenation(const ::tflite::ConcatenationOptions* opts,
+                          std::set<std::string>& problem_ops_set);
 
-  void checkConcatenation(const ::tflite::ConcatenationOptions*, std::set<std::string>&);
+  void checkConv2D(const ::tflite::Conv2DOptions* opts,
+                   std::set<std::string>& problem_ops_set);
 
-  void checkConv2D(const ::tflite::Conv2DOptions*, std::set<std::string>&);
+  void checkDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions* opts,
+                            std::set<std::string>& problem_ops_set);
 
-  void checkDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions*, std::set<std::string>&);
+  void checkFullyConnected(const ::tflite::FullyConnectedOptions* opts,
+                           std::set<std::string>& problem_ops_set);
 
-  void checkFullyConnected(const ::tflite::FullyConnectedOptions*, std::set<std::string>&);
+  void checkResizeNearestNeighbor(const ::tflite::ResizeNearestNeighborOptions* opts,
+                                  std::set<std::string>& problem_ops_set);
 
+  void checkStridedSlice(const ::tflite::StridedSliceOptions* opts,
+                         std::set<std::string>& problem_ops_set);
 private:
   Graph* _graph;