[mir_onnx] Add support for auto_pad attribute (#6943)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Wed, 28 Aug 2019 07:53:49 +0000 (16:53 +0900)
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Wed, 28 Aug 2019 07:53:49 +0000 (10:53 +0300)
Add support for `auto_pad` attribute to `Conv`, `AveragePool`, `MaxPool` operator converters.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
compiler/mir-onnx-importer/AttributeHelpers.h
compiler/mir-onnx-importer/CMakeLists.txt
compiler/mir-onnx-importer/ConvPoolHelpers.cpp [new file with mode: 0644]
compiler/mir-onnx-importer/ConvPoolHelpers.h [new file with mode: 0644]
compiler/mir-onnx-importer/ONNXHelpers.cpp
compiler/mir-onnx-importer/ONNXHelpers.h
compiler/mir-onnx-importer/Op/AveragePool.cpp
compiler/mir-onnx-importer/Op/Conv.cpp
compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp
compiler/mir-onnx-importer/Op/MaxPool.cpp

index 24e14d4..d5cc150 100644 (file)
@@ -57,6 +57,14 @@ template <> inline onnx::TensorProto getAttributeValue(const onnx::AttributeProt
 }
 
 template <>
+inline std::vector<std::int32_t> getAttributeValue(const onnx::AttributeProto &attribute)
+{
+  assert(attribute.type() == onnx::AttributeProto::INTS);
+  // TODO Check that values fit.
+  return {attribute.ints().cbegin(), attribute.ints().cend()};
+}
+
+template <>
 inline std::vector<std::int64_t> getAttributeValue(const onnx::AttributeProto &attribute)
 {
   assert(attribute.type() == onnx::AttributeProto::INTS);
index e319621..20b7c34 100644 (file)
@@ -21,6 +21,8 @@ target_link_libraries(mir_onnx_proto PUBLIC libprotobuf)
 
 set(MIR_ONNX_IMPORTER_SOURCES
         AttributeHelpers.h
+        ConvPoolHelpers.cpp
+        ConvPoolHelpers.h
         ONNXHelpers.cpp
         ONNXHelpers.h
         ONNXImporterImpl.cpp
diff --git a/compiler/mir-onnx-importer/ConvPoolHelpers.cpp b/compiler/mir-onnx-importer/ConvPoolHelpers.cpp
new file mode 100644 (file)
index 0000000..367aeff
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConvPoolHelpers.h"
+
+#include <algorithm>
+#include <cassert>
+
+namespace mir_onnx
+{
+
+void inferAutoPadding(const std::string &pad_type, const mir::Shape &input_shape,
+                      const std::vector<std::int32_t> &dilations,
+                      const std::vector<std::int32_t> &strides,
+                      const std::vector<std::int32_t> &window_size,
+                      std::vector<std::int32_t> &padding_before,
+                      std::vector<std::int32_t> &padding_after)
+{
+  constexpr int num_spatial_dims = 2;
+
+  if (pad_type == "NOTSET")
+  {
+    // Do nothing.
+  }
+  else if (pad_type == "VALID")
+  {
+    padding_before.assign(num_spatial_dims, 0);
+    padding_after.assign(num_spatial_dims, 0);
+  }
+  else
+  {
+    padding_before.resize(num_spatial_dims);
+    padding_after.resize(num_spatial_dims);
+
+    assert(dilations.size() == num_spatial_dims);
+    assert(strides.size() == num_spatial_dims);
+    assert(window_size.size() == num_spatial_dims);
+
+    for (int i = 0; i < num_spatial_dims; ++i)
+    {
+      const std::int32_t eff_window_size = (window_size[i] - 1) * dilations[i] + 1;
+      // Assuming input has NCHW format.
+      const std::int32_t residual = input_shape.dim(2 + i) % strides[i];
+      const std::int32_t total_pad = std::max(
+          INT32_C(0), residual == 0 ? eff_window_size - strides[i] : eff_window_size - residual);
+      if (pad_type == "SAME_UPPER")
+      {
+        padding_before[i] = total_pad / 2;
+        padding_after[i] = (total_pad + 1) / 2;
+      }
+      else
+      {
+        assert(pad_type == "SAME_LOWER");
+        padding_before[i] = (total_pad + 1) / 2;
+        padding_after[i] = total_pad / 2;
+      }
+    }
+  }
+}
+
+} // namespace mir_onnx
diff --git a/compiler/mir-onnx-importer/ConvPoolHelpers.h b/compiler/mir-onnx-importer/ConvPoolHelpers.h
new file mode 100644 (file)
index 0000000..fbe9133
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIR_ONNX_CONV_POOL_HELPERS_H
+#define MIR_ONNX_CONV_POOL_HELPERS_H
+
+#include "mir/Shape.h"
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace mir_onnx
+{
+
+void inferAutoPadding(const std::string &pad_type, const mir::Shape &input_shape,
+                      const std::vector<std::int32_t> &dilations,
+                      const std::vector<std::int32_t> &strides,
+                      const std::vector<std::int32_t> &window_size,
+                      std::vector<std::int32_t> &padding_before,
+                      std::vector<std::int32_t> &padding_after);
+
+} // namespace mir_onnx
+
+#endif // MIR_ONNX_CONV_POOL_HELPERS_H
index 2035918..0b042a9 100644 (file)
@@ -127,35 +127,4 @@ mir::TensorVariant createTensor(const onnx::TensorProto *tensor)
   return mir::TensorVariant(type, shape, src_data);
 }
 
-void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata)
-{
-  const auto kernel_shape = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "kernel_shape");
-  assert(!kernel_shape.empty());
-  const auto strides = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "strides");
-  assert(!strides.empty());
-  const auto *pads_attribute = findAttribute(onnx_node, "pads");
-
-  cdata.kernel_shape = mir::Shape(kernel_shape.size());
-  for (std::size_t i = 0; i < kernel_shape.size(); ++i)
-  {
-    cdata.kernel_shape.dim(i) = kernel_shape[i];
-  }
-
-  cdata.strides_shape = mir::Shape(strides.size());
-  for (std::size_t i = 0; i < strides.size(); ++i)
-  {
-    cdata.strides_shape.dim(i) = strides[i];
-  }
-
-  if (pads_attribute != nullptr)
-  {
-    const auto pads = getAttributeValue<std::vector<std::int64_t>>(*pads_attribute);
-    assert(pads.size() == 4);
-    cdata.padding_before[0] = pads[0];
-    cdata.padding_before[1] = pads[1];
-    cdata.padding_after[0] = pads[2];
-    cdata.padding_after[1] = pads[3];
-  }
-}
-
 } // namespace mir_onnx
index 8ccf1fc..73c3a2d 100644 (file)
@@ -29,16 +29,6 @@ namespace mir_onnx
 mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant &folded_kernel);
 mir::TensorVariant createTensor(const onnx::TensorProto *tensor);
 
-struct KernelStridesPadding
-{
-  mir::Shape kernel_shape;
-  mir::Shape strides_shape;
-  std::vector<int32_t> padding_before{0, 0};
-  std::vector<int32_t> padding_after{0, 0};
-};
-
-void getKernelStridesPadding(const onnx::NodeProto &onnx_node, KernelStridesPadding &cdata);
-
 template <typename OpType, typename... Types>
 mir::Operation *createOp(mir::Graph *graph, Types &&... args)
 {
index e1ea2a1..962961e 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "ONNXHelpers.h"
 #include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
 
 #include "mir/ops/PoolOp.h"
 
@@ -41,26 +42,50 @@ void AveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
 void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                                          ConverterContext *context) const
 {
-  const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
-  // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
-  if (auto_pad != "NOTSET")
-    throw std::runtime_error("Supported only explicit padding!");
-
   std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
   mir::Graph *graph = context->getGraph();
 
-  mir::ops::PoolOp::BorderType border_type = mir::ops::PoolOp::BorderType::EMPTY;
-  mir::ops::PoolOp::PoolingType pool_type = mir::ops::PoolOp::PoolingType::AVG;
-
-  KernelStridesPadding cdata;
-  // Transpose ONNX NCHW to MIR NHWC
-  auto t_input = convertONNXToMIR(graph, inputs[0]);
-
-  getKernelStridesPadding(onnx_node, cdata);
-
+  assert(inputs.size() == 1);
+  auto input = inputs[0];
+
+  const auto &input_shape = input->getShape();
+  if (input_shape.rank() != 4)
+    throw std::runtime_error("AveragePool: only 2-D input is supported.");
+
+  constexpr int num_spatial_dims = 2;
+
+  const auto strides =
+      getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+  if (strides.size() != num_spatial_dims)
+    throw std::runtime_error("AveragePool: attribute 'strides' has incorrect size.");
+
+  const auto kernel_shape = getAttributeValue<std::vector<std::int32_t>>(onnx_node, "kernel_shape");
+  if (kernel_shape.size() != num_spatial_dims)
+    throw std::runtime_error("AveragePool: attribute 'kernel_shape' has incorrect size.");
+
+  std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
+  std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
+  if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+  {
+    const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+    if (pads.size() != num_spatial_dims * 2)
+      throw std::runtime_error("AveragePool: attribute 'pads' has incorrect size.");
+    padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
+    padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
+  }
+  else
+  {
+    const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+    const std::vector<std::int32_t> dilations(num_spatial_dims, 1);
+    inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
+                     padding_after);
+  }
+
+  input = convertONNXToMIR(graph, input);
   auto result =
-      createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
-                                 cdata.padding_before, cdata.padding_after, border_type)
+      createOp<mir::ops::PoolOp>(graph, input, mir::ops::PoolOp::PoolingType::AVG,
+                                 mir::Shape(kernel_shape), mir::Shape(strides), padding_before,
+                                 padding_after, mir::ops::PoolOp::BorderType::EMPTY)
           ->getOutput(0);
   result = convertMIRToONNX(graph, result);
 
index af942fd..7042fbe 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "ONNXHelpers.h"
 #include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
 
 #include "mir/TensorUtil.h"
 
@@ -40,28 +41,58 @@ void ConvNodeConverter::convert(const onnx::NodeProto &onnx_node, ConverterConte
 
 void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterContext *context) const
 {
-  const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
-  // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
-  if (auto_pad != "NOTSET")
-    throw std::runtime_error("Supported only explicit padding!");
-
-  const auto *dilations = findAttribute(onnx_node, "dilations");
-  if (dilations != nullptr)
-  {
-    // check default (=1) dilations on each spatial axis
-    for (auto index = 0; index < dilations->ints_size(); index++)
-      if (dilations->ints(index) != 1)
-        throw std::runtime_error("Not supported dilations in Conv operation!");
-  }
-
   std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
   mir::Graph *graph = context->getGraph();
+
   assert(inputs.size() >= 2);
+  auto input = inputs[0];
+  auto kernel = inputs[1];
+
+  const auto &input_shape = input->getShape();
+  if (input_shape.rank() != 4)
+    throw std::runtime_error("Conv: only 2-D input is supported.");
+
+  constexpr int num_spatial_dims = 2;
+
+  const auto dilations =
+      getAttributeValue(onnx_node, "dilations", std::vector<std::int32_t>(num_spatial_dims, 1));
+  if (dilations.size() != num_spatial_dims)
+    throw std::runtime_error("Conv: attribute 'dilations' has incorrect size.");
+  if (!std::all_of(dilations.cbegin(), dilations.cend(), [](std::int32_t x) { return x == 1; }))
+    throw std::runtime_error("Conv: attribute 'dilations' has unsupported value.");
+
+  const auto strides =
+      getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+  if (strides.size() != num_spatial_dims)
+    throw std::runtime_error("Conv: attribute 'strides' has incorrect size.");
+
+  // Assuming kernel has OIHW format.
+  assert(kernel->getShape().rank() == 4);
+  const auto kernel_shape = getAttributeValue(
+      onnx_node, "kernel_shape",
+      std::vector<std::int32_t>{kernel->getShape().dim(2), kernel->getShape().dim(3)});
+  if (kernel_shape.size() != num_spatial_dims)
+    throw std::runtime_error("Conv: attribute 'kernel_shape' has incorrect size.");
+
+  std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
+  std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
+  if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+  {
+    const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+    if (pads.size() != num_spatial_dims * 2)
+      throw std::runtime_error("Conv: attribute 'pads' has incorrect size.");
+    padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
+    padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
+  }
+  else
+  {
+    const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+    inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
+                     padding_after);
+  }
 
-  KernelStridesPadding cdata;
-  getKernelStridesPadding(onnx_node, cdata);
   // FIXME: It can be non-constant value.
-  auto *in_weights = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
+  auto *in_weights = dynamic_cast<mir::ops::ConstantOp *>(kernel->getNode());
   assert(in_weights && "Weights could be a constant tensor only");
   const auto &in_weights_tensor = in_weights->getValue();
   // We should transpose ONNX MC(IO)HW to HWOI
@@ -74,16 +105,15 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
   bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group);
 
   mir::Operation::Output *result;
-  auto transposed_input = convertONNXToMIR(graph, inputs[0]);
+  input = convertONNXToMIR(graph, input);
   if (is_depthwise)
   {
     // TODO handle properly kernel with layer multiplier
     auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
-    auto kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
-    result =
-        createOp<mir::ops::DepthwiseConv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
-                                              cdata.padding_before, cdata.padding_after)
-            ->getOutput(0);
+    kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
+    result = createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, mir::Shape(strides),
+                                                   padding_before, padding_after)
+                 ->getOutput(0);
   }
   else
   {
@@ -91,15 +121,16 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon
     if (group != 1)
       kernel_tensor = fixGroupedKernel(group, kernel_tensor);
     kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
-    auto kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
-    result = createOp<mir::ops::Conv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
-                                          cdata.padding_before, cdata.padding_after)
+    kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
+    result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, mir::Shape(strides), padding_before,
+                                          padding_after)
                  ->getOutput(0);
   }
 
   if (inputs.size() > 2)
   {
-    result = createOp<mir::ops::AddOp>(graph, result, inputs[2])->getOutput(0);
+    auto bias = inputs[2];
+    result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
   }
 
   result = convertMIRToONNX(graph, result);
index ff5e1f8..048c5af 100644 (file)
@@ -28,21 +28,26 @@ void GlobalAveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
 {
   std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
   mir::Graph *graph = context->getGraph();
-  mir::ops::PoolOp::BorderType border_type = mir::ops::PoolOp::BorderType::ZEROFILLED;
-  mir::ops::PoolOp::PoolingType pool_type = mir::ops::PoolOp::PoolingType::AVG;
 
-  KernelStridesPadding cdata;
-  // Transpose ONNX NCHW to MIR NHWC
-  auto t_input = convertONNXToMIR(graph, inputs[0]);
+  assert(inputs.size() == 1);
+  auto input = inputs[0];
+
+  const auto &input_shape = input->getShape();
+  if (input_shape.rank() != 4)
+    throw std::runtime_error("GlobalAveragePool: only 2-D input is supported.");
 
   // GlobalAveragePool is equivalent to AveragePool with kernel size equal
-  // to the spatial dimension of input tensor
-  cdata.kernel_shape = {t_input->getShape().dim(1), t_input->getShape().dim(2)};
-  cdata.strides_shape = {1, 1};
+  // to the spatial dimension of input tensor.
+  const std::vector<std::int32_t> window_size{input->getShape().dim(2), input->getShape().dim(3)};
+  const std::vector<std::int32_t> strides{1, 1};
+  const std::vector<std::int32_t> padding_before{0, 0};
+  const std::vector<std::int32_t> padding_after{0, 0};
 
+  input = convertONNXToMIR(graph, input);
   auto result =
-      createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
-                                 cdata.padding_before, cdata.padding_after, border_type)
+      createOp<mir::ops::PoolOp>(graph, input, mir::ops::PoolOp::PoolingType::AVG,
+                                 mir::Shape(window_size), mir::Shape(strides), padding_before,
+                                 padding_after, mir::ops::PoolOp::BorderType::ZEROFILLED)
           ->getOutput(0);
   result = convertMIRToONNX(graph, result);
 
index 53d6b85..7633018 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "ONNXHelpers.h"
 #include "AttributeHelpers.h"
+#include "ConvPoolHelpers.h"
 
 #include "mir/ops/PoolOp.h"
 
@@ -41,26 +42,50 @@ void MaxPoolNodeConverter::convert(const onnx::NodeProto &onnx_node,
 void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node,
                                      ConverterContext *context) const
 {
-  const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
-  // auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID.
-  if (auto_pad != "NOTSET")
-    throw std::runtime_error("Supported only explicit padding!");
-
   std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
   mir::Graph *graph = context->getGraph();
 
-  mir::ops::PoolOp::BorderType border_type = mir::ops::PoolOp::BorderType::EMPTY;
-  mir::ops::PoolOp::PoolingType pool_type = mir::ops::PoolOp::PoolingType::MAX;
+  assert(inputs.size() == 1);
+  auto input = inputs[0];
+
+  const auto &input_shape = input->getShape();
+  if (input_shape.rank() != 4)
+    throw std::runtime_error("MaxPool: only 2-D input is supported.");
+
+  constexpr int num_spatial_dims = 2;
 
-  KernelStridesPadding cdata;
-  // Transpose ONNX NCHW to MIR NHWC
-  auto t_input = convertONNXToMIR(graph, inputs[0]);
+  const auto strides =
+      getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
+  if (strides.size() != num_spatial_dims)
+    throw std::runtime_error("MaxPool: attribute 'strides' has incorrect size.");
 
-  getKernelStridesPadding(onnx_node, cdata);
+  const auto kernel_shape = getAttributeValue<std::vector<std::int32_t>>(onnx_node, "kernel_shape");
+  if (kernel_shape.size() != num_spatial_dims)
+    throw std::runtime_error("MaxPool: attribute 'kernel_shape' has incorrect size.");
+
+  std::vector<std::int32_t> padding_before;
+  std::vector<std::int32_t> padding_after;
+  if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
+  {
+    const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
+    if (pads.size() != num_spatial_dims * 2)
+      throw std::runtime_error("MaxPool: attribute 'pads' has incorrect size.");
+    padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
+    padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
+  }
+  else
+  {
+    const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
+    const std::vector<std::int32_t> dilations(num_spatial_dims, 1);
+    inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
+                     padding_after);
+  }
 
+  input = convertONNXToMIR(graph, input);
   auto result =
-      createOp<mir::ops::PoolOp>(graph, t_input, pool_type, cdata.kernel_shape, cdata.strides_shape,
-                                 cdata.padding_before, cdata.padding_after, border_type)
+      createOp<mir::ops::PoolOp>(graph, input, mir::ops::PoolOp::PoolingType::MAX,
+                                 mir::Shape(kernel_shape), mir::Shape(strides), padding_before,
+                                 padding_after, mir::ops::PoolOp::BorderType::EMPTY)
           ->getOutput(0);
   result = convertMIRToONNX(graph, result);