[nnc] Support Transpose operation on TFLite importer (#2716)
authorПавел Ильютченко/AI Tools Lab /SRR/Engineer/삼성전자 <p.iliutchenk@samsung.com>
Tue, 18 Dec 2018 13:57:06 +0000 (16:57 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Tue, 18 Dec 2018 13:57:06 +0000 (16:57 +0300)
* Support TransposeOp on TFLite importer
* Add const where needed in TFLite op creator

Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h

index de667b5..218a138 100644 (file)
@@ -107,6 +107,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
     case BuiltinOperator_TANH:
     case BuiltinOperator_RELU:
     case BuiltinOperator_RELU6:
+    case BuiltinOperator_TRANSPOSE:
       // No checks
       break;
     default:
@@ -186,7 +187,8 @@ void TfliteImporter::walkOperator(const Operator* op) {
                                                  op->builtin_options_as<ConcatenationOptions>());
       break;
     case BuiltinOperator_RESHAPE:
-      outputs = _opCreator->convertReshape(inputs, params, op->builtin_options_as<ReshapeOptions>());
+      outputs = _opCreator->convertReshape(
+        inputs, params, op->builtin_options_as<ReshapeOptions>());
       break;
     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
       outputs = _opCreator->convertResizeNN(inputs, params,
@@ -197,17 +199,19 @@ void TfliteImporter::walkOperator(const Operator* op) {
        op->builtin_options_as<ReducerOptions>());
       break;
     case BuiltinOperator_FULLY_CONNECTED:
-      outputs = _opCreator->convertFullyConnected(inputs, params,
-                                                  op->builtin_options_as<FullyConnectedOptions>());
+      outputs = _opCreator->convertFullyConnected(
+        inputs, params, op->builtin_options_as<FullyConnectedOptions>());
       break;
     case BuiltinOperator_SOFTMAX:
-      outputs = _opCreator->createSoftmax(inputs, params, op->builtin_options_as<SoftmaxOptions>());
+      outputs = _opCreator->createSoftmax(
+        inputs, params, op->builtin_options_as<SoftmaxOptions>());
       break;
     case BuiltinOperator_SLICE:
       outputs = _opCreator->createSlice(inputs, params, op->builtin_options_as_SliceOptions());
       break;
     case BuiltinOperator_SQUEEZE:
-      outputs = _opCreator->createSqueeze(inputs, params, op->builtin_options_as<SqueezeOptions>());
+      outputs = _opCreator->createSqueeze(
+        inputs, params, op->builtin_options_as<SqueezeOptions>());
       break;
     case BuiltinOperator_LOGISTIC:
       outputs = _opCreator->createLogistic(inputs, params);
@@ -225,7 +229,8 @@ void TfliteImporter::walkOperator(const Operator* op) {
       outputs = _opCreator->createDiv(inputs, params, op->builtin_options_as<DivOptions>());
       break;
     case BuiltinOperator_MAXIMUM:
-      outputs = _opCreator->createMax(inputs, params, op->builtin_options_as<MaximumMinimumOptions>());
+      outputs = _opCreator->createMax(
+        inputs, params, op->builtin_options_as<MaximumMinimumOptions>());
       break;
     case BuiltinOperator_TRANSPOSE_CONV:
       outputs = _opCreator->createTransposeConv(
@@ -243,6 +248,10 @@ void TfliteImporter::walkOperator(const Operator* op) {
     case BuiltinOperator_RELU6:
       outputs = _opCreator->createActivation(inputs, params, ActivationFunctionType_RELU6);
       break;
+    case BuiltinOperator_TRANSPOSE:
+      outputs = _opCreator->createTranspose(
+        inputs, params, op->builtin_options_as<TransposeOptions>());
+      break;
     default:
       assert(false && "All unsupported types should have been found before this pass.");
   }
index 488a47b..5ddf15e 100644 (file)
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
-#include "core/modelIR/operations/ScaleOp.h"
 #include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SliceOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
 #include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/TransposeOp.h"
 
 #include "pass/PassException.h"
 
@@ -84,8 +84,9 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps& inputs, InputParams& params,
-                                                            const Conv2DOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::convertConv2D(InputOps& inputs, const InputParams& params,
+                               const Conv2DOptions* opts) {
   const auto& input_shape = inputs[0]->getOutputShape(0);
   const auto& kernel_shape = params[0].getShape();
   Shape strides{opts->stride_h(), opts->stride_w()};
@@ -107,7 +108,7 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::convertDepthwiseConv2D(InputOps& inputs, InputParams& params,
+TFLiteOpCreator::convertDepthwiseConv2D(InputOps& inputs, const InputParams& params,
                                         const DepthwiseConv2DOptions* opts) {
   const auto& input_shape = inputs[0]->getOutputShape(0);
   const auto& kernel_shape = params[0].getShape();
@@ -130,9 +131,9 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertConcatenation(InputOps& inputs,
-                                                                   InputParams& params,
-                                                                   const ConcatenationOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::convertConcatenation(InputOps& inputs, const InputParams& params,
+                                      const ConcatenationOptions* opts) {
   std::vector<IODescriptor> descriptors;
   for (auto i : inputs)
     descriptors.push_back(i->getOutput(0));
@@ -145,8 +146,9 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps& inputs, InputParams& params,
-                                                               const Pool2DOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::convertMaxPool2D(InputOps& inputs, const InputParams& params,
+                                  const Pool2DOptions* opts) {
   auto& input_shape = inputs[0]->getOutputShape(0);
   Shape window_shape{opts->filter_height(), opts->filter_width()};
   Shape strides{opts->stride_h(), opts->stride_w()};
@@ -163,7 +165,7 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps& inputs,
 }
 
 std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps& inputs,
-                                                                   InputParams& params,
+                                                                   const InputParams& params,
                                                                    const Pool2DOptions* opts) {
   auto& input_shape = inputs[0]->getOutputShape(0);
   Shape window_shape{opts->filter_height(), opts->filter_width()};
@@ -180,8 +182,9 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps& inp
                                ops::PoolOp::RoundMode::floor);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createSoftmax(InputOps& inputs, InputParams& params,
-                                                            const SoftmaxOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::createSoftmax(InputOps& inputs, const InputParams& params,
+                               const SoftmaxOptions* opts) {
   // Softmax in TFLite is always 2-D.
   assert(inputs[0]->getOutputShape(0).rank() == 2);
   int32_t axis = 1;
@@ -197,8 +200,9 @@ Shape shapeFromTensor(mir::Tensor<int32_t>&& t) {
   return temporary_shape;
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps& inputs, InputParams& params,
-                                                          const ::tflite::SliceOptions*) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::createSlice(InputOps& inputs, const InputParams& params,
+                             const ::tflite::SliceOptions*) {
   auto starts = shapeFromTensor(mir::Tensor<int32_t>(params[0]));
   auto sizes = shapeFromTensor(mir::Tensor<int32_t>(params[1]));
   assert(starts.rank() == inputs[0]->getOutputShape(0).rank() &&
@@ -207,8 +211,9 @@ std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps& inputs, Inpu
                                 starts, sizes);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps& inputs, InputParams& params,
-                                                             const ReshapeOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::convertReshape(InputOps& inputs, const InputParams& params,
+                                const ReshapeOptions* opts) {
   // TODO: we should also support "-1" values in new_shape, which means that correct
   // shape values must be calculated. Better do it in the shape inference module.
   Shape new_shape = ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
@@ -218,7 +223,7 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps& inputs, I
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createTransposeConv(InputOps& inputs, InputParams& params,
+TFLiteOpCreator::createTransposeConv(InputOps& inputs, const InputParams& params,
                                      const ::tflite::TransposeConvOptions* opts) {
   Shape strides{opts->stride_h(), opts->stride_w(), 1};
   return createOp<ops::DeConv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), params[1],
@@ -226,7 +231,7 @@ TFLiteOpCreator::createTransposeConv(InputOps& inputs, InputParams& params,
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::convertResizeNN(InputOps& inputs, InputParams& params,
+TFLiteOpCreator::convertResizeNN(InputOps& inputs, const InputParams& params,
                                  const ::tflite::ResizeNearestNeighborOptions* opts) {
   // TODO support aligned corners
   assert(!opts->align_corners() && "Aligned corners not currently supported");
@@ -278,7 +283,8 @@ TFLiteOpCreator::createMul(InputOps& inputs, const InputParams& params,
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createDiv(InputOps& inputs, InputParams&, const ::tflite::DivOptions* opts) {
+TFLiteOpCreator::createDiv(InputOps& inputs, const InputParams&,
+                           const ::tflite::DivOptions* opts) {
   std::vector<IODescriptor> descriptors;
   for (auto i : inputs)
     descriptors.push_back(i->getOutput(0));
@@ -287,7 +293,7 @@ TFLiteOpCreator::createDiv(InputOps& inputs, InputParams&, const ::tflite::DivOp
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createMax(InputOps& inputs, InputParams&,
+TFLiteOpCreator::createMax(InputOps& inputs, const InputParams&,
                            const ::tflite::MaximumMinimumOptions* opts) {
   std::vector<IODescriptor> descriptors;
   for (auto i : inputs)
@@ -296,9 +302,10 @@ TFLiteOpCreator::createMax(InputOps& inputs, InputParams&,
                                       ops::ElementwiseOp::OpType::max);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertReducer(InputOps& inputs, InputParams& params,
-                                                             ops::ReduceFOp::FuncType ft,
-                                                             const ::tflite::ReducerOptions* opts) {
+std::vector<mir::Operation*>
+TFLiteOpCreator::convertReducer(InputOps& inputs, const InputParams& params,
+                                ops::ReduceFOp::FuncType ft,
+                                const ::tflite::ReducerOptions* opts) {
   assert(params.at(0).getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
   mir::Tensor<int> tensor(params.at(0));
   std::vector<int32_t> axes;
@@ -321,7 +328,7 @@ void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
 
 std::vector<mir::Operation*>
 TFLiteOpCreator::convertFullyConnected(InputOps& inputs,
-                                       InputParams& params,
+                                       const InputParams& params,
                                        const FullyConnectedOptions* opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fc_input_size]
   int32_t fc_input_size = params[0].getShape().dim(0);
@@ -371,7 +378,7 @@ mir::Operation* TFLiteOpCreator::addFusedActivation(mir::Operation* input,
 }
 
 std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(
-  InputOps& inputs, InputParams& params, const ::tflite::SqueezeOptions* opts) {
+  InputOps& inputs, const InputParams& params, const ::tflite::SqueezeOptions* opts) {
 
   std::vector<int32_t> squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()};
 
@@ -379,8 +386,11 @@ std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(
                                   squeeze_dims);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps& inputs, InputParams& params,
-                                               const ::tflite::PadOptions *opts) {
+
+std::vector<mir::Operation*>
+TFLiteOpCreator::createPad(InputOps& inputs, const InputParams& params,
+                           const ::tflite::PadOptions *opts) {
+
   assert(params.size() == 1); // support pad with one param
   std::vector<std::pair<int32_t, int32_t>> paddings;
 
@@ -404,20 +414,38 @@ std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps& inputs, InputP
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createActivation(InputOps& inputs, InputParams&,
+TFLiteOpCreator::createActivation(InputOps& inputs, const InputParams&,
                                   const ::tflite::ActivationFunctionType activationType) {
   assert(inputs.size() == 1);
   return {addFusedActivation(inputs[0], activationType)};
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createSqrt(InputOps& inputs, InputParams&) {
+TFLiteOpCreator::createSqrt(InputOps& inputs, const InputParams&) {
   return createOp<ops::SqrtOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createLogistic(InputOps& inputs, InputParams&) {
+TFLiteOpCreator::createLogistic(InputOps& inputs, const InputParams&) {
   return createOp<ops::SigmoidOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
 }
 
+std::vector<mir::Operation*>
+TFLiteOpCreator::createTranspose(InputOps& inputs, const InputParams& params,
+                                 const ::tflite::TransposeOptions*) {
+
+  assert(params.size() == 1);
+  std::vector<std::size_t> axis_order;
+
+  mir::Tensor<int32_t> permutation_tensor(params[0]);
+
+  ShapeRange range(permutation_tensor.getShape());
+  for (const auto& index : range) {
+    axis_order.push_back(permutation_tensor.at(index));
+  }
+
+  return createOp<ops::TransposeOp>(ActivationFunctionType_NONE,
+                                    inputs[0]->getOutput(0), axis_order);
+}
+
 } // namespace nnc
index 3ae321a..38b4bea 100644 (file)
@@ -47,62 +47,69 @@ public:
 
   explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
 
-  std::vector<mir::Operation*> convertConv2D(InputOps&, InputParams&, const ::tflite::Conv2DOptions*);
+  std::vector<mir::Operation*> convertConv2D(InputOps&, const InputParams&,
+                                             const ::tflite::Conv2DOptions*);
 
-  std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps&, const InputParams&,
                                                       const ::tflite::DepthwiseConv2DOptions*);
 
-  std::vector<mir::Operation*> convertConcatenation(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertConcatenation(InputOps&, const InputParams&,
                                                     const ::tflite::ConcatenationOptions*);
 
-  std::vector<mir::Operation*> convertMaxPool2D(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertMaxPool2D(InputOps&, const InputParams&,
                                                 const ::tflite::Pool2DOptions*);
 
-  std::vector<mir::Operation*> convertAveragePool2D(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertAveragePool2D(InputOps&, const InputParams&,
                                                     const ::tflite::Pool2DOptions*);
 
-  std::vector<mir::Operation*> convertReducer(InputOps&, InputParams&, ops::ReduceFOp::FuncType,
-                                        const ::tflite::ReducerOptions*);
+  std::vector<mir::Operation*> convertReducer(InputOps&, const InputParams&,
+                                              ops::ReduceFOp::FuncType,
+                                              const ::tflite::ReducerOptions*);
 
-  std::vector<mir::Operation*> createSoftmax(InputOps&, InputParams&, const ::tflite::SoftmaxOptions*);
+  std::vector<mir::Operation*> createSoftmax(InputOps&, const InputParams&,
+                                             const ::tflite::SoftmaxOptions*);
 
-  std::vector<mir::Operation*> createSlice(InputOps&, InputParams&, const ::tflite::SliceOptions*);
+  std::vector<mir::Operation*> createSlice(InputOps&, const InputParams&,
+                                           const ::tflite::SliceOptions*);
   
-  std::vector<mir::Operation*> convertReshape(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertReshape(InputOps&, const InputParams&,
                                               const ::tflite::ReshapeOptions*);
 
-  std::vector<mir::Operation*> convertFullyConnected(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertFullyConnected(InputOps&, const InputParams&,
                                                      const ::tflite::FullyConnectedOptions*);
 
-  std::vector<mir::Operation*> convertResizeNN(InputOps&, InputParams&,
+  std::vector<mir::Operation*> convertResizeNN(InputOps&, const InputParams&,
                                                const ::tflite::ResizeNearestNeighborOptions*);
 
-  std::vector<mir::Operation*> createLogistic(InputOps& inputs, InputParams& params);
+  std::vector<mir::Operation*> createLogistic(InputOps& inputs, const InputParams& params);
 
-  std::vector<mir::Operation*> createSqrt(InputOps& inputs, InputParams& params);
+  std::vector<mir::Operation*> createSqrt(InputOps& inputs, const InputParams& params);
 
-  std::vector<mir::Operation*> createSqueeze(InputOps& inputs, InputParams& params,
+  std::vector<mir::Operation*> createSqueeze(InputOps& inputs, const InputParams& params,
                                              const ::tflite::SqueezeOptions* opts);
 
   /** @brief Elementwise Add  */
-  std::vector<mir::Operation*> createAdd(InputOps&, const InputParams&, const ::tflite::AddOptions*);
+  std::vector<mir::Operation*> createAdd(InputOps&, const InputParams&,
+                                         const ::tflite::AddOptions*);
   /** @brief Elementwise product */
-  std::vector<mir::Operation*> createMul(InputOps&, const InputParams&, const ::tflite::MulOptions*);
+  std::vector<mir::Operation*> createMul(InputOps&, const InputParams&,
+                                         const ::tflite::MulOptions*);
   /** @brief Elementwise maximum  */
-  std::vector<mir::Operation*> createMax(InputOps&, InputParams&, const ::tflite::MaximumMinimumOptions*);
+  std::vector<mir::Operation*> createMax(InputOps&, const InputParams&,
+                                         const ::tflite::MaximumMinimumOptions*);
   /** @brief Elementwise division  */
-  std::vector<mir::Operation*> createDiv(InputOps&, InputParams&, const ::tflite::DivOptions*);
+  std::vector<mir::Operation*> createDiv(InputOps&, const InputParams&,
+                                         const ::tflite::DivOptions*);
 
   /// @brief Free-standing ( non-fused ) activation function based on tflite activation
-  std::vector<mir::Operation*> createActivation(InputOps&, InputParams&,
+  std::vector<mir::Operation*> createActivation(InputOps&, const InputParams&,
                                                 const ::tflite::ActivationFunctionType);
   /**
  * @brief Creates a Transposed convolution
  * @param params 0 - output shape (unused), 1 - kernel, 2- input
  */
-  std::vector<mir::Operation*> createTransposeConv(
-    InputOps&, InputParams&,
-    const ::tflite::TransposeConvOptions*);
+  std::vector<mir::Operation*> createTransposeConv(InputOps&, const InputParams&,
+                                                   const ::tflite::TransposeConvOptions*);
 
   /**
    * @brief Create a Pad operation
@@ -111,9 +118,19 @@ public:
    * @param opts TFLite PadOptions
    * @return Operations vector
    */
-  std::vector<mir::Operation*> createPad(InputOps& inputs, InputParams& params,
+  std::vector<mir::Operation*> createPad(InputOps&, const InputParams&,
                                          const ::tflite::PadOptions* opts);
 
+  /**
+   * @brief Create a Transpose operation
+   * @param inputs Operations vector
+   * @param params Tensor with axis order
+   * @param opts TFLite TransposeOptions
+   * @return Operations vector
+   */
+  std::vector<mir::Operation*> createTranspose(InputOps&, const InputParams&,
+                                               const ::tflite::TransposeOptions*);
+
   void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
 
   void checkConcatenation(const ::tflite::ConcatenationOptions*, std::set<std::string>&);