[mir-onnx-importer] Use new operations interface in importer. (#8182)
authorGusev Dmitry/Engineer/AI Tools Lab /SRR/Samsung Electronics <d.gusev@partner.samsung.com>
Wed, 16 Oct 2019 10:56:40 +0000 (13:56 +0300)
committerAlexander Efimov/./AI Tools Lab/Samsung Electronics <a.efimov@samsung.com>
Wed, 16 Oct 2019 10:56:40 +0000 (13:56 +0300)
* Using new operations interface in importer.

Changes due to new operations constructors signatures.

Signed-off-by: Dmitry Gusev <d.gusev@partner.samsung.com>
compiler/mir-onnx-importer/Op/AveragePool.cpp
compiler/mir-onnx-importer/Op/Conv.cpp
compiler/mir-onnx-importer/Op/ConvTranspose.cpp
compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
compiler/mir-onnx-importer/Op/MaxPool.cpp
compiler/mir-onnx-importer/Op/Pad.cpp

index cc9e647..37381db 100644 (file)
@@ -81,9 +81,14 @@ void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                      padding_after);
   }
 
-  auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, kernel_shape, strides, padding_before,
-                                                padding_after, false, mir::DataFormat::NCHW)
-                    ->getOutput(0);
+  mir::AvgPool2DOpAttributes attributes;
+  attributes.window = kernel_shape;
+  attributes.strides = strides;
+  attributes.padding_before = padding_before;
+  attributes.padding_after = padding_after;
+  attributes.include_pad = false;
+  attributes.data_format = mir::DataFormat::NCHW;
+  auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index 06c31c3..ed9cc49 100644 (file)
@@ -106,14 +106,17 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
   bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group);
 
   mir::Operation::Output *result;
+  mir::Conv2DOpAttributes attributes;
+  attributes.strides = strides;
+  attributes.padding_before = padding_before;
+  attributes.padding_after = padding_after;
+  attributes.data_format = mir::DataFormat::NCHW;
   if (is_depthwise)
   {
     // TODO handle properly kernel with layer multiplier
     auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
     kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
-    result = createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, strides, padding_before,
-                                                   padding_after, mir::DataFormat::NCHW)
-                 ->getOutput(0);
+    result = createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, attributes)->getOutput(0);
   }
   else
   {
@@ -123,9 +126,7 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
     // HWIO -> OHWI
     kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
     kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
-    result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, strides, padding_before,
-                                          padding_after, mir::DataFormat::NCHW)
-                 ->getOutput(0);
+    result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, attributes)->getOutput(0);
   }
 
   if (inputs.size() > 2)
index 4bd47cc..4162516 100644 (file)
@@ -103,9 +103,11 @@ void ConvTransposeNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
       throw std::runtime_error("ConvTranspose: attribute 'output_shape' has incorrect size.");
     const mir::Shape output_shape{input_shape.dim(0), kernel->getShape().dim(2), output_size[0],
                                   output_size[1]};
-    result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, strides,
-                                            mir::ops::PaddingType::SameUpper, output_shape,
-                                            mir::DataFormat::NCHW)
+    mir::Deconv2DOpAttributes attributes;
+    attributes.strides = strides;
+    attributes.data_format = mir::DataFormat::NCHW;
+    attributes.padding_type = mir::ops::PaddingType::SameUpper;
+    result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes, output_shape)
                  ->getOutput(0);
   }
   else
@@ -129,9 +131,12 @@ void ConvTransposeNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
       inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_size, padding_before,
                        padding_after);
     }
-    result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, strides, padding_before,
-                                            padding_after, mir::DataFormat::NCHW)
-                 ->getOutput(0);
+    mir::Deconv2DOpAttributes attributes;
+    attributes.strides = strides;
+    attributes.padding_before = padding_before;
+    attributes.padding_after = padding_after;
+    attributes.data_format = mir::DataFormat::NCHW;
+    result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes)->getOutput(0);
   }
 
   if (inputs.size() > 2)
index fc161e1..9dd429c 100644 (file)
@@ -39,13 +39,11 @@ void GlobalAveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
   // GlobalAveragePool is equivalent to AveragePool with kernel size equal
   // to the spatial dimension of input tensor.
   const std::vector<std::int32_t> window_size{input->getShape().dim(2), input->getShape().dim(3)};
-  const std::vector<std::int32_t> strides{1, 1};
-  const std::vector<std::int32_t> padding_before{0, 0};
-  const std::vector<std::int32_t> padding_after{0, 0};
+  mir::AvgPool2DOpAttributes attributes;
+  attributes.window = window_size;
+  attributes.data_format = mir::DataFormat::NCHW;
 
-  auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, window_size, strides, padding_before,
-                                                padding_after, true, mir::DataFormat::NCHW)
-                    ->getOutput(0);
+  auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index a8fa520..2f18028 100644 (file)
@@ -81,9 +81,13 @@ void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                      padding_after);
   }
 
-  auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, kernel_shape, strides, padding_before,
-                                                padding_after, mir::DataFormat::NCHW)
-                    ->getOutput(0);
+  mir::MaxPool2DOpAttributes attributes;
+  attributes.window = kernel_shape;
+  attributes.strides = strides;
+  attributes.padding_before = padding_before;
+  attributes.padding_after = padding_after;
+  attributes.data_format = mir::DataFormat::NCHW;
+  auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, attributes)->getOutput(0);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index 5015c41..1608cfe 100644 (file)
@@ -55,16 +55,16 @@ void PadNodeConverter::convertPadAttrName(const std::string &pad_attr_name,
 
   const int num_dims = input->getShape().rank();
   assert(pads.size() == num_dims * 2);
-  std::vector<std::int32_t> padding_before(num_dims);
-  std::vector<std::int32_t> padding_after(num_dims);
+  mir::PadOpAttributes attributes(num_dims);
   for (int i = 0; i < num_dims; i++)
   {
-    padding_before[i] = pads[i];
-    padding_after[i] = pads[num_dims + i];
+    attributes.padding_before[i] = pads[i];
+    attributes.padding_after[i] = pads[num_dims + i];
   }
 
-  auto result =
-      createOp<mir::ops::PadOp>(graph, input, padding_before, padding_after, value)->getOutput(0);
+  attributes.padding_value = value;
+
+  auto result = createOp<mir::ops::PadOp>(graph, input, attributes)->getOutput(0);
 
   context->setNodeOutputs(onnx_node, {result});
 }