[mir_onnx] Remove unsing converters before operation with NCHW layout (#7044)
authorПавел Ильютченко/AI Tools Lab /SRR/Engineer/삼성전자 <p.iliutchenk@samsung.com>
Tue, 3 Sep 2019 17:02:49 +0000 (20:02 +0300)
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Tue, 3 Sep 2019 17:02:49 +0000 (20:02 +0300)
* Removed converters
* Fixed some params
* Fixed consant shape in BatchNormalization (for right work of
broadcasting in elementwise operations)

Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
compiler/mir-onnx-importer/ONNXHelpers.h
compiler/mir-onnx-importer/Op/AveragePool.cpp
compiler/mir-onnx-importer/Op/BatchNormalization.cpp
compiler/mir-onnx-importer/Op/Conv.cpp
compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
compiler/mir-onnx-importer/Op/MaxPool.cpp
compiler/mir-onnx-importer/Op/Upsample.cpp

index 334fe17..1784a4c 100644 (file)
@@ -35,20 +35,6 @@ mir::Operation *createOp(mir::Graph *graph, Types &&... args)
   return graph->create<OpType>(std::forward<Types>(args)...);
 }
 
-inline mir::Operation::Output *convertONNXToMIR(mir::Graph *graph, mir::Operation::Output *arg)
-{
-  // NCHW -> NHWC
-  return createOp<mir::ops::TransposeOp>(graph, arg, std::vector<std::size_t>{0, 2, 3, 1})
-      ->getOutput(0);
-}
-
-inline mir::Operation::Output *convertMIRToONNX(mir::Graph *graph, mir::Operation::Output *arg)
-{
-  // NHWC -> NCHW
-  return createOp<mir::ops::TransposeOp>(graph, arg, std::vector<std::size_t>{0, 3, 1, 2})
-      ->getOutput(0);
-}
-
 } // namespace mir_onnx
 
 #endif // __MIR_ONNX_HELPERS_H__
index bd4c10a..cc9e647 100644 (file)
@@ -81,11 +81,9 @@ void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                      padding_after);
   }
 
-  input = convertONNXToMIR(graph, input);
   auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, kernel_shape, strides, padding_before,
-                                                padding_after, false, mir::DataFormat::NHWC)
+                                                padding_after, false, mir::DataFormat::NCHW)
                     ->getOutput(0);
-  result = convertMIRToONNX(graph, result);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index cce1da8..67b7203 100644 (file)
@@ -25,6 +25,7 @@
 #include "mir/ops/AddOp.h"
 #include "mir/ops/ConstantOp.h"
 #include "mir/ops/MulOp.h"
+#include "mir/ops/ReshapeOp.h"
 
 #include <cmath>
 
@@ -114,12 +115,19 @@ void BatchNormalizationNodeConverter::convertV9(const onnx::NodeProto &onnx_node
   for (const auto &idx : mir::ShapeRange(scale_accessor.getShape()))
     scale_accessor.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
 
+  assert(mean_accessor.getShape().rank() == 1);
+  assert(input->getShape().rank() == 4 && "Supported only 4D input");
+  mir::Shape new_shape{1, 1, 1, 1};
+  new_shape.dim(1) = mean_accessor.getShape().dim(0); // set channel dim
+
+  auto reshaped_mean = createOp<mir::ops::ReshapeOp>(graph, mean, new_shape)->getOutput(0);
+  auto reshaped_scale = createOp<mir::ops::ReshapeOp>(graph, scale, new_shape)->getOutput(0);
+  auto reshaped_bias = createOp<mir::ops::ReshapeOp>(graph, bias, new_shape)->getOutput(0);
+
   // Y = (X + C1) * C2 + bias
-  input = convertONNXToMIR(graph, input);
-  auto result = createOp<mir::ops::AddOp>(graph, input, mean)->getOutput(0);
-  result = createOp<mir::ops::MulOp>(graph, result, scale)->getOutput(0);
-  result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
-  result = convertMIRToONNX(graph, result);
+  auto result = createOp<mir::ops::AddOp>(graph, input, reshaped_mean)->getOutput(0);
+  result = createOp<mir::ops::MulOp>(graph, result, reshaped_scale)->getOutput(0);
+  result = createOp<mir::ops::AddOp>(graph, result, reshaped_bias)->getOutput(0);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index 7042fbe..ea29c3f 100644 (file)
@@ -105,15 +105,15 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
   bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group);
 
   mir::Operation::Output *result;
-  input = convertONNXToMIR(graph, input);
   if (is_depthwise)
   {
     // TODO handle properly kernel with layer multiplier
     auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
     kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
-    result = createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, mir::Shape(strides),
-                                                   padding_before, padding_after)
-                 ->getOutput(0);
+    result =
+        createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, mir::Shape(strides),
+                                              padding_before, padding_after, mir::DataFormat::NCHW)
+            ->getOutput(0);
   }
   else
   {
@@ -123,7 +123,7 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
     kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
     kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
     result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, mir::Shape(strides), padding_before,
-                                          padding_after)
+                                          padding_after, mir::DataFormat::NCHW)
                  ->getOutput(0);
   }
 
@@ -133,8 +133,6 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
     result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
   }
 
-  result = convertMIRToONNX(graph, result);
-
   context->setNodeOutputs(onnx_node, {result});
 }
 
index 4b2e43b..fc161e1 100644 (file)
@@ -43,11 +43,9 @@ void GlobalAveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
   const std::vector<std::int32_t> padding_before{0, 0};
   const std::vector<std::int32_t> padding_after{0, 0};
 
-  input = convertONNXToMIR(graph, input);
   auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, window_size, strides, padding_before,
-                                                padding_after, true, mir::DataFormat::NHWC)
+                                                padding_after, true, mir::DataFormat::NCHW)
                     ->getOutput(0);
-  result = convertMIRToONNX(graph, result);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index 0ba4524..a8fa520 100644 (file)
@@ -81,11 +81,9 @@ void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                      padding_after);
   }
 
-  input = convertONNXToMIR(graph, input);
   auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, kernel_shape, strides, padding_before,
-                                                padding_after, mir::DataFormat::NHWC)
+                                                padding_after, mir::DataFormat::NCHW)
                     ->getOutput(0);
-  result = convertMIRToONNX(graph, result);
 
   context->setNodeOutputs(onnx_node, {result});
 }
index 4bc8326..3c8e314 100644 (file)
@@ -60,17 +60,16 @@ void UpsampleNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
 
   assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
   std::vector<float> scales_vector(4);
-  // NHWC
+  // NCHW
   scales_vector.at(0) = 1.0f;
-  scales_vector.at(1) = h_scale;
-  scales_vector.at(2) = w_scale;
-  scales_vector.at(3) = 1.0f;
+  scales_vector.at(1) = 1.0f;
+  scales_vector.at(2) = h_scale;
+  scales_vector.at(3) = w_scale;
 
   auto result =
-      createOp<mir::ops::ResizeOp>(graph, convertONNXToMIR(graph, inputs[0]),
+      createOp<mir::ops::ResizeOp>(graph, inputs[0],
                                    mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
           ->getOutput(0);
-  result = convertMIRToONNX(graph, result);
 
   context->setNodeOutputs(onnx_node, {result});
 }
@@ -95,17 +94,16 @@ void UpsampleNodeConverter::convertV7(const onnx::NodeProto &onnx_node,
 
   assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
   std::vector<float> scales_vector(4);
-  // NHWC
+  // NCHW
   scales_vector.at(0) = scales_attr->floats(0);
-  scales_vector.at(1) = scales_attr->floats(2);
-  scales_vector.at(2) = scales_attr->floats(3);
-  scales_vector.at(3) = scales_attr->floats(1);
+  scales_vector.at(1) = scales_attr->floats(1);
+  scales_vector.at(2) = scales_attr->floats(2);
+  scales_vector.at(3) = scales_attr->floats(3);
 
   auto result =
-      createOp<mir::ops::ResizeOp>(graph, convertONNXToMIR(graph, inputs[0]),
+      createOp<mir::ops::ResizeOp>(graph, inputs[0],
                                    mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
           ->getOutput(0);
-  result = convertMIRToONNX(graph, result);
 
   context->setNodeOutputs(onnx_node, {result});
 }
@@ -130,16 +128,14 @@ void UpsampleNodeConverter::convertV9(const onnx::NodeProto &onnx_node,
          "The number of elements of 'scales' should be the same as the rank of input 'X'");
   assert(rank == 4 && "Only rank 4 is supported");
   std::vector<float> scales_vector(4);
-  const int onnx2mir[] = {0, 3, 1, 2};
   assert(scales_tensor.getShape().rank() == 1 && "Scales are a 1d tensor");
   for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
-    scales_vector[onnx2mir[i]] = scales_tensor.atOffset(i);
+    scales_vector[i] = scales_tensor.atOffset(i);
 
   auto result =
-      createOp<mir::ops::ResizeOp>(graph, convertONNXToMIR(graph, inputs[0]),
+      createOp<mir::ops::ResizeOp>(graph, inputs[0],
                                    mir::ops::ResizeOp::ResizeMethod::nearestNeighbor, scales_vector)
           ->getOutput(0);
-  result = convertMIRToONNX(graph, result);
 
   context->setNodeOutputs(onnx_node, {result});
 }