From: Gusev Dmitry/Engineer/AI Tools Lab /SRR/Samsung Electronics Date: Wed, 16 Oct 2019 10:56:40 +0000 (+0300) Subject: [mir-onnx-importer] Use new operations interface in importer. (#8182) X-Git-Tag: submit/tizen/20191205.083104~747 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=31f6572163cd6989d9c147ee1a6a35ab5f71dd88;p=platform%2Fcore%2Fml%2Fnnfw.git [mir-onnx-importer] Use new operations interface in importer. (#8182) * Using new operations interface in importer. Changes due to new operations constructors signatures. Signed-off-by: Dmitry Gusev --- diff --git a/compiler/mir-onnx-importer/Op/AveragePool.cpp b/compiler/mir-onnx-importer/Op/AveragePool.cpp index cc9e647..37381db 100644 --- a/compiler/mir-onnx-importer/Op/AveragePool.cpp +++ b/compiler/mir-onnx-importer/Op/AveragePool.cpp @@ -81,9 +81,14 @@ void AveragePoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node, padding_after); } - auto result = createOp(graph, input, kernel_shape, strides, padding_before, - padding_after, false, mir::DataFormat::NCHW) - ->getOutput(0); + mir::AvgPool2DOpAttributes attributes; + attributes.window = kernel_shape; + attributes.strides = strides; + attributes.padding_before = padding_before; + attributes.padding_after = padding_after; + attributes.include_pad = false; + attributes.data_format = mir::DataFormat::NCHW; + auto result = createOp(graph, input, attributes)->getOutput(0); context->setNodeOutputs(onnx_node, {result}); } diff --git a/compiler/mir-onnx-importer/Op/Conv.cpp b/compiler/mir-onnx-importer/Op/Conv.cpp index 06c31c3..ed9cc49 100644 --- a/compiler/mir-onnx-importer/Op/Conv.cpp +++ b/compiler/mir-onnx-importer/Op/Conv.cpp @@ -106,14 +106,17 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon bool is_depthwise = (group != 1) && (in_group_size == 1) && (out_channels == group); mir::Operation::Output *result; + mir::Conv2DOpAttributes attributes; + attributes.strides = strides; + attributes.padding_before = padding_before; + attributes.padding_after = padding_after; + attributes.data_format = mir::DataFormat::NCHW; if (is_depthwise) { // TODO handle properly kernel with layer multiplier auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor); kernel = createOp(graph, transposed_tensor)->getOutput(0); - result = createOp(graph, input, kernel, strides, padding_before, - padding_after, mir::DataFormat::NCHW) - ->getOutput(0); + result = createOp(graph, input, kernel, attributes)->getOutput(0); } else { @@ -123,9 +126,7 @@ void ConvNodeConverter::convertV1(const onnx::NodeProto &onnx_node, ConverterCon // HWIO -> OHWI kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor); kernel = createOp(graph, kernel_tensor)->getOutput(0); - result = createOp(graph, input, kernel, strides, padding_before, - padding_after, mir::DataFormat::NCHW) - ->getOutput(0); + result = createOp(graph, input, kernel, attributes)->getOutput(0); } if (inputs.size() > 2) diff --git a/compiler/mir-onnx-importer/Op/ConvTranspose.cpp b/compiler/mir-onnx-importer/Op/ConvTranspose.cpp index 4bd47cc..4162516 100644 --- a/compiler/mir-onnx-importer/Op/ConvTranspose.cpp +++ b/compiler/mir-onnx-importer/Op/ConvTranspose.cpp @@ -103,9 +103,11 @@ void ConvTransposeNodeConverter::convertV1(const onnx::NodeProto &onnx_node, throw std::runtime_error("ConvTranspose: attribute 'output_shape' has incorrect size."); const mir::Shape output_shape{input_shape.dim(0), kernel->getShape().dim(2), output_size[0], output_size[1]}; - result = createOp(graph, input, kernel, strides, - mir::ops::PaddingType::SameUpper, output_shape, - mir::DataFormat::NCHW) + mir::Deconv2DOpAttributes attributes; + attributes.strides = strides; + attributes.data_format = mir::DataFormat::NCHW; + attributes.padding_type = mir::ops::PaddingType::SameUpper; + result = createOp(graph, input, kernel, attributes, output_shape) ->getOutput(0); } else @@ -129,9 +131,12 @@ void ConvTransposeNodeConverter::convertV1(const onnx::NodeProto &onnx_node, inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_size, padding_before, padding_after); } - result = createOp(graph, input, kernel, strides, padding_before, - padding_after, mir::DataFormat::NCHW) - ->getOutput(0); + mir::Deconv2DOpAttributes attributes; + attributes.strides = strides; + attributes.padding_before = padding_before; + attributes.padding_after = padding_after; + attributes.data_format = mir::DataFormat::NCHW; + result = createOp(graph, input, kernel, attributes)->getOutput(0); } if (inputs.size() > 2) diff --git a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp index fc161e1..9dd429c 100644 --- a/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp +++ b/compiler/mir-onnx-importer/Op/GlobalAveragePool.cpp @@ -39,13 +39,11 @@ void GlobalAveragePoolNodeConverter::convert(const onnx::NodeProto &onnx_node, // GlobalAveragePool is equivalent to AveragePool with kernel size equal // to the spatial dimension of input tensor. const std::vector window_size{input->getShape().dim(2), input->getShape().dim(3)}; - const std::vector strides{1, 1}; - const std::vector padding_before{0, 0}; - const std::vector padding_after{0, 0}; + mir::AvgPool2DOpAttributes attributes; + attributes.window = window_size; + attributes.data_format = mir::DataFormat::NCHW; - auto result = createOp(graph, input, window_size, strides, padding_before, - padding_after, true, mir::DataFormat::NCHW) - ->getOutput(0); + auto result = createOp(graph, input, attributes)->getOutput(0); context->setNodeOutputs(onnx_node, {result}); } diff --git a/compiler/mir-onnx-importer/Op/MaxPool.cpp b/compiler/mir-onnx-importer/Op/MaxPool.cpp index a8fa520..2f18028 100644 --- a/compiler/mir-onnx-importer/Op/MaxPool.cpp +++ b/compiler/mir-onnx-importer/Op/MaxPool.cpp @@ -81,9 +81,13 @@ void MaxPoolNodeConverter::convertV1(const onnx::NodeProto &onnx_node, padding_after); } - auto result = createOp(graph, input, kernel_shape, strides, padding_before, - padding_after, mir::DataFormat::NCHW) - ->getOutput(0); + mir::MaxPool2DOpAttributes attributes; + attributes.window = kernel_shape; + attributes.strides = strides; + attributes.padding_before = padding_before; + attributes.padding_after = padding_after; + attributes.data_format = mir::DataFormat::NCHW; + auto result = createOp(graph, input, attributes)->getOutput(0); context->setNodeOutputs(onnx_node, {result}); } diff --git a/compiler/mir-onnx-importer/Op/Pad.cpp b/compiler/mir-onnx-importer/Op/Pad.cpp index 5015c41..1608cfe 100644 --- a/compiler/mir-onnx-importer/Op/Pad.cpp +++ b/compiler/mir-onnx-importer/Op/Pad.cpp @@ -55,16 +55,16 @@ void PadNodeConverter::convertPadAttrName(const std::string &pad_attr_name, const int num_dims = input->getShape().rank(); assert(pads.size() == num_dims * 2); - std::vector padding_before(num_dims); - std::vector padding_after(num_dims); + mir::PadOpAttributes attributes(num_dims); for (int i = 0; i < num_dims; i++) { - padding_before[i] = pads[i]; - padding_after[i] = pads[num_dims + i]; + attributes.padding_before[i] = pads[i]; + attributes.padding_after[i] = pads[num_dims + i]; } - auto result = - createOp(graph, input, padding_before, padding_after, value)->getOutput(0); + attributes.padding_value = value; + + auto result = createOp(graph, input, attributes)->getOutput(0); context->setNodeOutputs(onnx_node, {result}); }