[exo-tflite] Export DepthwiseConv2D operation (#6720)
author채성우/On-Device Lab(SR)/Engineer/삼성전자 <sw4670.chae@samsung.com>
Tue, 20 Aug 2019 10:29:21 +0000 (19:29 +0900)
committer박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Tue, 20 Aug 2019 10:29:21 +0000 (19:29 +0900)
This commit introduce DepthwiseConv2D operation to exporter.

Signed-off-by: seongwoo <sw4670.chae@samsung.com>
compiler/exo-tflite/src/OperationExporter.cpp

index fc939ec..c322f65 100644 (file)
@@ -51,6 +51,7 @@ public:
   void visit(loco::MaxPool2D *) final;
   void visit(loco::AvgPool2D *) final;
   void visit(loco::Conv2D *) final;
+  void visit(loco::DepthwiseConv2D *) final;
   void visit(loco::TensorConcat *) final;
   void visit(loco::TensorSoftmax *) final;
   void visit(loco::BiasEncode *) final;
@@ -174,6 +175,53 @@ void OperationExporter::visit(loco::Conv2D *node)
   gd._operators.push_back(op_offset);
 }
 
+void OperationExporter::visit(loco::DepthwiseConv2D *node)
+{
+  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_DEPTHWISE_CONV_2D);
+
+  // Third input of DEPTHWISE_CONV2D of tflite should be bias. We will make (and register to gd)
+  // dummy zero bias. Bias would be rank 1, have size of output kernel count, and have all zero
+  // values, i.e. zero bias.
+  auto *ker = dynamic_cast<loco::DepthwiseFilterEncode *>(node->ker());
+  assert(ker);
+
+  int32_t bias_vec_size = ShapeInference::get(ker)._dims[3]; // output_size(C*M)
+  auto bias_vec_shape_offset = builder.CreateVector(std::vector<int32_t>{bias_vec_size});
+
+  size_t raw_bias_vec_size = bias_vec_size * sizeof(int32_t);
+  std::vector<float> bias_vec_data(bias_vec_size);
+  auto bias_vec_offset =
+      builder.CreateVector(reinterpret_cast<uint8_t *>(bias_vec_data.data()), raw_bias_vec_size);
+
+  auto bias_buffer_offset = CreateBuffer(builder, bias_vec_offset);
+
+  const auto bias_buffer_id = static_cast<uint32_t>(gd._buffers.size());
+
+  gd._buffers.push_back(bias_buffer_offset);
+
+  auto bias_tensor_offset =
+      CreateTensor(builder, bias_vec_shape_offset, TensorType_FLOAT32, bias_buffer_id);
+  auto bias_tensor_id = static_cast<int32_t>(gd._tensors.size());
+  gd._tensors.push_back(bias_tensor_offset);
+
+  std::vector<int32_t> inputs_vec{get_tensor_index(node->ifm()), get_tensor_index(node->ker()),
+                                  bias_tensor_id};
+  std::vector<int32_t> outputs_vec{get_tensor_index(static_cast<loco::Node *>(node))};
+  auto inputs = builder.CreateVector(inputs_vec);
+  auto outputs = builder.CreateVector(outputs_vec);
+  tflite::Padding padding = getOpPadding(node->pad());
+
+  int32_t ifm_channel_size = ShapeInference::get(node->ifm())._dims[3];
+  // multiplier = bias_vec_size(output_size)/ifm_channel_size
+  auto options =
+      CreateDepthwiseConv2DOptions(builder, padding, node->stride()->horizontal(),
+                                   node->stride()->vertical(), bias_vec_size / ifm_channel_size);
+
+  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+                                  tflite::BuiltinOptions_DepthwiseConv2DOptions, options.Union());
+  gd._operators.push_back(op_offset);
+}
+
 void OperationExporter::visit(loco::TensorSoftmax *node)
 {
   // TODO Support when the input rank of TensorSoftmax is not 2