[nnc] Fix deconv2D import in tflite importer (#2544)
authorVladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 <v.plazun@samsung.com>
Thu, 10 Jan 2019 09:30:28 +0000 (12:30 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Thu, 10 Jan 2019 09:30:28 +0000 (12:30 +0300)
Use output shape stored in tflite model as it's impossible to compute it from operation parameters

Signed-off-by: Vladimir Plazun <v.plazun@partner.samsung.com>
contrib/nnc/core/modelIR/operations/DeConv2DOp.cpp
contrib/nnc/include/core/modelIR/operations/Deconv2DOp.h
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h

index b7d6dce..95df7c3 100644 (file)
@@ -20,6 +20,23 @@ namespace nnc {
 namespace mir {
 namespace ops {
 
+void DeConv2DOp::inferPaddings() {
+  auto& input_shape = getInputShape(0);
+  auto& kernel_shape = _kernel.getShape();
+  auto output_shape = getOutputShape(0);
+
+  // As stupid as it sounds but it seems like there is no difference in padding calculation
+  // between SAME and VALID padding types ( at least for tflite )
+  for (int d = 0; d < 2; ++d) {
+    //See `ComputePadding` in tflite sources
+    int pad = (input_shape.dim(d + 1) - 1) * _strides.dim(d)
+              + kernel_shape.dim(d) - output_shape.dim(d + 1);
+
+    _paddingBefore[d] = pad / 2;
+    _paddingAfter[d] = pad - _paddingBefore[d];
+  }
+}
+
 // See https://github.com/tensorflow/tensorflow/issues/2118
 // VALID: output = input * stride + filter - stride
 // SAME: output = input * stride - stride + 1
index a91aec0..3187fa9 100644 (file)
@@ -54,6 +54,22 @@ public:
     inferOutputShapes();
   }
 
+  DeConv2DOp(const IODescriptor& arg,
+             const TensorVariant& kernel,
+             const Shape& strides,
+             PaddingType padding_type,
+             const Shape& output_shape)
+    : Operation(Type::deConv2D, {arg}),
+      _kernel(kernel),
+      _strides(strides),
+      _paddingType(padding_type),
+      _paddingBefore(2),
+      _paddingAfter(2) {
+    assert(_paddingType != PaddingType::Custom);
+    setOutputShape(0, output_shape);
+    inferPaddings();
+  }
+
   const TensorVariant& getKernel() const { return _kernel; }
 
   const Shape& getStrides() const { return _strides; }
@@ -67,6 +83,11 @@ public:
 private:
   void inferOutputShapes();
 
+  /**
+   * @brief Compute paddings based on input shape, kernel shape and strides
+   */
+  void inferPaddings();
+
   const TensorVariant _kernel;
   Shape _strides;
   PaddingType _paddingType;
index de060f0..f270e4e 100644 (file)
@@ -253,10 +253,13 @@ void TfliteImporter::walkOperator(const Operator* op) {
     case BuiltinOperator_SQUARED_DIFFERENCE:
       outputs = _opCreator->createSquaredDifference(inputs, params); // no activation
       break;
-    case BuiltinOperator_TRANSPOSE_CONV:
+    case BuiltinOperator_TRANSPOSE_CONV: {
+      auto tensor = (*_tensors)[op->outputs()->Get(0)];
+      auto out_shape = ShapeHelper::createShape(*tensor->shape(), tensor->shape()->size());
       outputs = _opCreator->createTransposeConv(
-        inputs, params,op->builtin_options_as<TransposeConvOptions>());
+        inputs, params, op->builtin_options_as<TransposeConvOptions>(), out_shape);
       break;
+    }
     case BuiltinOperator_PAD:
       outputs = _opCreator->createPad(inputs, params, op->builtin_options_as<PadOptions>());
       break;
@@ -330,6 +333,9 @@ std::vector<mir::TensorVariant> TfliteImporter::createOpParams(const Operator* o
         // TODO: Currently this is only used by the interpreter and shape inference,
         // don't forget to change this if tensor shape processing architecture changes.
         params_for_op.emplace_back(mir::transposeTensor<1, 2, 3, 0>(tensor));
+      } else if (opcode == BuiltinOperator_TRANSPOSE_CONV && t->shape()->size() == 4) {
+        //Tflite uses [in, H, W, out] and we expect kernel to be [H, W, in, out]
+        params_for_op.emplace_back(mir::transposeTensor<1, 2, 0, 3>(tensor));
       } else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2) {
         params_for_op.emplace_back(mir::transposeTensor<1, 0>(tensor));
       } else {
index 6d039b3..f7ddd7f 100644 (file)
@@ -225,10 +225,12 @@ TFLiteOpCreator::convertReshape(InputOps& inputs, const InputParams& params,
 
 std::vector<mir::Operation*>
 TFLiteOpCreator::createTransposeConv(InputOps& inputs, const InputParams& params,
-                                     const ::tflite::TransposeConvOptions* opts) {
+                                     const ::tflite::TransposeConvOptions* opts,
+                                     const Shape& output_shape) {
   Shape strides{opts->stride_h(), opts->stride_w()};
+
   return createOp<ops::DeConv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), params[1],
-                                   strides, paddingMap[opts->padding()]);
+                                   strides, paddingMap[opts->padding()], output_shape);
 }
 
 std::vector<mir::Operation*>
index de79a64..6ba6f92 100644 (file)
@@ -103,8 +103,10 @@ public:
  * @brief Creates a Transposed convolution
  * @param params 0 - output shape (unused), 1 - kernel, 2- input
  */
-  std::vector<mir::Operation*> createTransposeConv(InputOps&, const InputParams&,
-                                                   const ::tflite::TransposeConvOptions*);
+  std::vector<mir::Operation*> createTransposeConv(
+    InputOps&, const InputParams&,
+    const ::tflite::TransposeConvOptions*,
+    const Shape&);
 
   /**
    * @brief Create a Pad operation