From 1001caf04e5ac500841db305b665a04d33e3c191 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Thu, 4 Jun 2020 14:48:31 +0300 Subject: [PATCH] Add support for ONNX Pad-11 (#744) --- model-optimizer/automation/package_BOM.txt | 1 + .../extensions/front/onnx/pad_converter.py | 56 ++++++++++++++++++ .../extensions/front/onnx/pad_converter_test.py | 66 ++++++++++++++++++++++ model-optimizer/extensions/front/onnx/pad_ext.py | 23 ++++---- model-optimizer/mo/ops/pad.py | 22 ++++++++ 5 files changed, 158 insertions(+), 10 deletions(-) create mode 100644 model-optimizer/extensions/front/onnx/pad_converter.py create mode 100644 model-optimizer/extensions/front/onnx/pad_converter_test.py diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index 98374c0..9cf2e62 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -282,6 +282,7 @@ extensions/front/onnx/normalize_ext.py extensions/front/onnx/normalize_l2_normalize.py extensions/front/onnx/one_hot_ext.py extensions/front/onnx/one_hot_normalize.py +extensions/front/onnx/pad_converter.py extensions/front/onnx/pad_ext.py extensions/front/onnx/parameter_ext.py extensions/front/onnx/person_detection_crossroad.json diff --git a/model-optimizer/extensions/front/onnx/pad_converter.py b/model-optimizer/extensions/front/onnx/pad_converter.py new file mode 100644 index 0000000..691ef8c --- /dev/null +++ b/model-optimizer/extensions/front/onnx/pad_converter.py @@ -0,0 +1,56 @@ +""" + Copyright (C) 2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" +from extensions.ops.split import Split +from mo.front.common.partial_infer.utils import int64_array +from mo.front.common.replacement import FrontReplacementOp +from mo.front.tf.graph_utils import create_op_with_const_inputs +from mo.graph.graph import Graph, rename_node, Node +from mo.ops.const import Const +from mo.ops.pad import Pad + + +class ONNXPadToPad(FrontReplacementOp): + """ + This transformation converts ONNXPad operation (ONNX semantic) to Pad operation (Inference Engine semantic). + Refer to the Op implementation for the operations semantics description. + """ + op = 'ONNXPad' + enabled = True + + def replace_op(self, graph: Graph, node: Node): + # save the original node name to use it in the new Pad op instance + original_name = node.soft_get('name', node.id) + rename_node(node, original_name + '/TBR') + + new_pad = Pad(graph, {'mode': node.soft_get('mode', None)}).create_node() + rename_node(new_pad, original_name) + + node.in_port(0).get_connection().set_destination(new_pad.in_port(0)) + + if node.soft_get('mode') == 'constant': + # the input with fill value is an optional third input in ONNX + if not node.in_port(2).disconnected(): + node.in_port(2).get_connection().set_destination(new_pad.in_port(3)) + else: + new_pad.in_port(3).connect(Const(graph, {'value': 0.0}).create_node().out_port(0)) + + # convert ONNX representation of the pads as [2 * N] to MO representation: [N] and [N] + split_pads = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2}) + node.in_port(1).get_connection().set_destination(split_pads.in_port(0)) + split_pads.out_port(0).connect(new_pad.in_port(1)) + split_pads.out_port(1).connect(new_pad.in_port(2)) + + return [new_pad.id] diff --git a/model-optimizer/extensions/front/onnx/pad_converter_test.py b/model-optimizer/extensions/front/onnx/pad_converter_test.py new file mode 100644 index 0000000..560399e --- /dev/null +++ b/model-optimizer/extensions/front/onnx/pad_converter_test.py @@ -0,0 +1,66 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import unittest + +import numpy as np + +from extensions.front.onnx.pad_converter import ONNXPadToPad +from mo.utils.ir_engine.compare_graphs import compare_graphs +from mo.utils.unittest.graph import build_graph, const + +nodes_attributes = { + 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, + **const('pads', np.array([1, 2, 3, 4], dtype=np.int64)), + **const('value', np.array(0.5, dtype=np.float32)), + 'onnx_pad': {'type': None, 'kind': 'op', 'op': 'ONNXPad', 'name': 'my_pad', 'mode': 'constant'}, + 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, + + 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad'}, + 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2}, + **const('split_axis', np.array(0, dtype=np.int32)), +} + + +class AttributedClampNormalizerTest(unittest.TestCase): + def test_1(self): + graph = build_graph(nodes_attributes, + [('placeholder', 'onnx_pad', {'in': 0, 'out': 0}), + ('pads', 'onnx_pad', {'in': 1, 'out': 0}), + ('value', 'onnx_pad', {'in': 2, 'out': 0}), + ('onnx_pad', 'result', {'in': 0, 'out': 0}), + ], + {}, nodes_with_edges_only=True) + + graph_ref = build_graph(nodes_attributes, + [('placeholder', 'pad', {'in': 0, 'out': 0}), + ('pads', 'split', {'in': 0, 'out': 0}), + ('split_axis', 'split', {'in': 1, 'out': 0}), + ('split', 'pad', {'in': 1, 'out': 0}), + ('split', 'pad', {'in': 2, 'out': 1}), + ('value', 'pad', {'in': 3, 'out': 0}), + ('pad', 'result') + ], + {}, nodes_with_edges_only=True) + + graph.graph['layout'] = 'NCHW' + graph.stage = 'front' + + ONNXPadToPad().find_and_replace_pattern(graph) + + (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) + self.assertTrue(flag, resp) + self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pad')[0]]['name'] == 'my_pad') diff --git a/model-optimizer/extensions/front/onnx/pad_ext.py b/model-optimizer/extensions/front/onnx/pad_ext.py index a372460..8246f4c 100644 --- a/model-optimizer/extensions/front/onnx/pad_ext.py +++ b/model-optimizer/extensions/front/onnx/pad_ext.py @@ -17,8 +17,8 @@ import numpy as np from mo.front.extractor import FrontExtractorOp -from mo.front.onnx.extractors.utils import onnx_attr -from mo.ops.pad import AttributedPad +from mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version +from mo.ops.pad import AttributedPad, ONNXPad class PadFrontExtractor(FrontExtractorOp): @@ -28,16 +28,19 @@ class PadFrontExtractor(FrontExtractorOp): @classmethod def extract(cls, node): mode = onnx_attr(node, 'mode', 's', default='constant', dst_type=lambda x: x.decode()) - pads = onnx_attr(node, 'pads', 'ints', dst_type=lambda x: np.array(x, dtype=np.int64)) - value = onnx_attr(node, 'value', 'f', default=0.) + if get_onnx_opset_version(node) < 11: + pads = onnx_attr(node, 'pads', 'ints', dst_type=lambda x: np.array(x, dtype=np.int64)) + value = onnx_attr(node, 'value', 'f', default=0.) - assert pads is not None + assert pads is not None - # MO Pad op and ONNX Pad op have different format for pads values - # MO Pad has Dx2 where D is the total number of dimensions - # ONNX Pad pads flat layout, so need to reshape and transpose + # MO Pad op and ONNX Pad op have different format for pads values + # MO Pad has Dx2 where D is the total number of dimensions + # ONNX Pad pads flat layout, so need to reshape and transpose - pads = np.transpose(pads.reshape([2, -1])) + pads = np.transpose(pads.reshape([2, -1])) - AttributedPad.update_node_stat(node, {'mode': mode, 'pads': pads, 'fill_value': value}) + AttributedPad.update_node_stat(node, {'mode': mode, 'pads': pads, 'fill_value': value}) + else: + ONNXPad.update_node_stat(node, {'mode': mode}) return cls.enabled diff --git a/model-optimizer/mo/ops/pad.py b/model-optimizer/mo/ops/pad.py index d4a5f8d..66b88c3 100644 --- a/model-optimizer/mo/ops/pad.py +++ b/model-optimizer/mo/ops/pad.py @@ -138,3 +138,25 @@ class TFPad(Op): 'out_ports_count': 1, 'mode': 'constant', }, attrs) + +class ONNXPad(Op): + """ Pad operation that explicitly extends an input tensor at borders. + + This operation with the ONNX semantics with inputs: + 1. Input tensor. + 2. Pad values + 3. Fill value (Optional) + """ + + op = 'ONNXPad' + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'op': self.op, + 'type': None, + 'infer': None, # the operation should be replaced before the shape inference + 'in_ports_count': 3, + 'out_ports_count': 1, + 'mode': 'constant', + }, attrs) -- 2.7.4