* Using new operations interface in importer.
Changes due to new operations constructors signatures.
Signed-off-by: Dmitry Gusev <d.gusev@partner.samsung.com>
padding_after);
}
- auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, kernel_shape, strides, padding_before,
- padding_after, false, mir::DataFormat::NCHW)
- ->getOutput(0);
+ mir::AvgPool2DOpAttributes attributes;
+ attributes.window = kernel_shape;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.include_pad = false;
+ attributes.data_format = mir::DataFormat::NCHW;
+ auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group);
mir::Operation::Output *result;
+ mir::Conv2DOpAttributes attributes;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.data_format = mir::DataFormat::NCHW;
if (is_depthwise)
{
// TODO handle properly kernel with layer multiplier
auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
- result = createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, strides, padding_before,
- padding_after, mir::DataFormat::NCHW)
- ->getOutput(0);
+ result = createOp<mir::ops::DepthwiseConv2DOp>(graph, input, kernel, attributes)->getOutput(0);
}
else
{
// HWIO -> OHWI
kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
- result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, strides, padding_before,
- padding_after, mir::DataFormat::NCHW)
- ->getOutput(0);
+ result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, attributes)->getOutput(0);
}
if (inputs.size() > 2)
throw std::runtime_error("ConvTranspose: attribute 'output_shape' has incorrect size.");
const mir::Shape output_shape{input_shape.dim(0), kernel->getShape().dim(2), output_size[0],
output_size[1]};
- result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, strides,
- mir::ops::PaddingType::SameUpper, output_shape,
- mir::DataFormat::NCHW)
+ mir::Deconv2DOpAttributes attributes;
+ attributes.strides = strides;
+ attributes.data_format = mir::DataFormat::NCHW;
+ attributes.padding_type = mir::ops::PaddingType::SameUpper;
+ result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes, output_shape)
->getOutput(0);
}
else
inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_size, padding_before,
padding_after);
}
- result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, strides, padding_before,
- padding_after, mir::DataFormat::NCHW)
- ->getOutput(0);
+ mir::Deconv2DOpAttributes attributes;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.data_format = mir::DataFormat::NCHW;
+ result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes)->getOutput(0);
}
if (inputs.size() > 2)
// GlobalAveragePool is equivalent to AveragePool with kernel size equal
// to the spatial dimension of input tensor.
const std::vector<std::int32_t> window_size{input->getShape().dim(2), input->getShape().dim(3)};
- const std::vector<std::int32_t> strides{1, 1};
- const std::vector<std::int32_t> padding_before{0, 0};
- const std::vector<std::int32_t> padding_after{0, 0};
+ mir::AvgPool2DOpAttributes attributes;
+ attributes.window = window_size;
+ attributes.data_format = mir::DataFormat::NCHW;
- auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, window_size, strides, padding_before,
- padding_after, true, mir::DataFormat::NCHW)
- ->getOutput(0);
+ auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
padding_after);
}
- auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, kernel_shape, strides, padding_before,
- padding_after, mir::DataFormat::NCHW)
- ->getOutput(0);
+ mir::MaxPool2DOpAttributes attributes;
+ attributes.window = kernel_shape;
+ attributes.strides = strides;
+ attributes.padding_before = padding_before;
+ attributes.padding_after = padding_after;
+ attributes.data_format = mir::DataFormat::NCHW;
+ auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, attributes)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}
const int num_dims = input->getShape().rank();
assert(pads.size() == num_dims * 2);
- std::vector<std::int32_t> padding_before(num_dims);
- std::vector<std::int32_t> padding_after(num_dims);
+ mir::PadOpAttributes attributes(num_dims);
for (int i = 0; i < num_dims; i++)
{
- padding_before[i] = pads[i];
- padding_after[i] = pads[num_dims + i];
+ attributes.padding_before[i] = pads[i];
+ attributes.padding_after[i] = pads[num_dims + i];
}
- auto result =
- createOp<mir::ops::PadOp>(graph, input, padding_before, padding_after, value)->getOutput(0);
+ attributes.padding_value = value;
+
+ auto result = createOp<mir::ops::PadOp>(graph, input, attributes)->getOutput(0);
context->setNodeOutputs(onnx_node, {result});
}