extensions/front/onnx/normalize_l2_normalize.py
extensions/front/onnx/one_hot_ext.py
extensions/front/onnx/one_hot_normalize.py
+extensions/front/onnx/pad_converter.py
extensions/front/onnx/pad_ext.py
extensions/front/onnx/parameter_ext.py
extensions/front/onnx/person_detection_crossroad.json
--- /dev/null
+"""
+ Copyright (C) 2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+from extensions.ops.split import Split
+from mo.front.common.partial_infer.utils import int64_array
+from mo.front.common.replacement import FrontReplacementOp
+from mo.front.tf.graph_utils import create_op_with_const_inputs
+from mo.graph.graph import Graph, rename_node, Node
+from mo.ops.const import Const
+from mo.ops.pad import Pad
+
+
+class ONNXPadToPad(FrontReplacementOp):
+ """
+ This transformation converts ONNXPad operation (ONNX semantic) to Pad operation (Inference Engine semantic).
+ Refer to the Op implementation for the operations semantics description.
+ """
+ op = 'ONNXPad'
+ enabled = True
+
+ def replace_op(self, graph: Graph, node: Node):
+ # save the original node name to use it in the new Pad op instance
+ original_name = node.soft_get('name', node.id)
+ rename_node(node, original_name + '/TBR')
+
+ new_pad = Pad(graph, {'mode': node.soft_get('mode', None)}).create_node()
+ rename_node(new_pad, original_name)
+
+ node.in_port(0).get_connection().set_destination(new_pad.in_port(0))
+
+ if node.soft_get('mode') == 'constant':
+ # the input with fill value is an optional third input in ONNX
+ if not node.in_port(2).disconnected():
+ node.in_port(2).get_connection().set_destination(new_pad.in_port(3))
+ else:
+ new_pad.in_port(3).connect(Const(graph, {'value': 0.0}).create_node().out_port(0))
+
+ # convert ONNX representation of the pads as [2 * N] to MO representation: [N] and [N]
+ split_pads = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2})
+ node.in_port(1).get_connection().set_destination(split_pads.in_port(0))
+ split_pads.out_port(0).connect(new_pad.in_port(1))
+ split_pads.out_port(1).connect(new_pad.in_port(2))
+
+ return [new_pad.id]
--- /dev/null
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.onnx.pad_converter import ONNXPadToPad
+from mo.utils.ir_engine.compare_graphs import compare_graphs
+from mo.utils.unittest.graph import build_graph, const
+
+nodes_attributes = {
+ 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
+ **const('pads', np.array([1, 2, 3, 4], dtype=np.int64)),
+ **const('value', np.array(0.5, dtype=np.float32)),
+ 'onnx_pad': {'type': None, 'kind': 'op', 'op': 'ONNXPad', 'name': 'my_pad', 'mode': 'constant'},
+ 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'},
+
+ 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad'},
+ 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2},
+ **const('split_axis', np.array(0, dtype=np.int32)),
+}
+
+
+class AttributedClampNormalizerTest(unittest.TestCase):
+ def test_1(self):
+ graph = build_graph(nodes_attributes,
+ [('placeholder', 'onnx_pad', {'in': 0, 'out': 0}),
+ ('pads', 'onnx_pad', {'in': 1, 'out': 0}),
+ ('value', 'onnx_pad', {'in': 2, 'out': 0}),
+ ('onnx_pad', 'result', {'in': 0, 'out': 0}),
+ ],
+ {}, nodes_with_edges_only=True)
+
+ graph_ref = build_graph(nodes_attributes,
+ [('placeholder', 'pad', {'in': 0, 'out': 0}),
+ ('pads', 'split', {'in': 0, 'out': 0}),
+ ('split_axis', 'split', {'in': 1, 'out': 0}),
+ ('split', 'pad', {'in': 1, 'out': 0}),
+ ('split', 'pad', {'in': 2, 'out': 1}),
+ ('value', 'pad', {'in': 3, 'out': 0}),
+ ('pad', 'result')
+ ],
+ {}, nodes_with_edges_only=True)
+
+ graph.graph['layout'] = 'NCHW'
+ graph.stage = 'front'
+
+ ONNXPadToPad().find_and_replace_pattern(graph)
+
+ (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
+ self.assertTrue(flag, resp)
+ self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pad')[0]]['name'] == 'my_pad')
import numpy as np
from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.ops.pad import AttributedPad
+from mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version
+from mo.ops.pad import AttributedPad, ONNXPad
class PadFrontExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
mode = onnx_attr(node, 'mode', 's', default='constant', dst_type=lambda x: x.decode())
- pads = onnx_attr(node, 'pads', 'ints', dst_type=lambda x: np.array(x, dtype=np.int64))
- value = onnx_attr(node, 'value', 'f', default=0.)
+ if get_onnx_opset_version(node) < 11:
+ pads = onnx_attr(node, 'pads', 'ints', dst_type=lambda x: np.array(x, dtype=np.int64))
+ value = onnx_attr(node, 'value', 'f', default=0.)
- assert pads is not None
+ assert pads is not None
- # MO Pad op and ONNX Pad op have different format for pads values
- # MO Pad has Dx2 where D is the total number of dimensions
- # ONNX Pad pads flat layout, so need to reshape and transpose
+ # MO Pad op and ONNX Pad op have different format for pads values
+ # MO Pad has Dx2 where D is the total number of dimensions
+ # ONNX Pad pads flat layout, so need to reshape and transpose
- pads = np.transpose(pads.reshape([2, -1]))
+ pads = np.transpose(pads.reshape([2, -1]))
- AttributedPad.update_node_stat(node, {'mode': mode, 'pads': pads, 'fill_value': value})
+ AttributedPad.update_node_stat(node, {'mode': mode, 'pads': pads, 'fill_value': value})
+ else:
+ ONNXPad.update_node_stat(node, {'mode': mode})
return cls.enabled
'out_ports_count': 1,
'mode': 'constant',
}, attrs)
+
+class ONNXPad(Op):
+ """ Pad operation that explicitly extends an input tensor at borders.
+
+ This operation with the ONNX semantics with inputs:
+ 1. Input tensor.
+ 2. Pad values
+ 3. Fill value (Optional)
+ """
+
+ op = 'ONNXPad'
+ enabled = False
+
+ def __init__(self, graph: Graph, attrs: dict):
+ super().__init__(graph, {
+ 'op': self.op,
+ 'type': None,
+ 'infer': None, # the operation should be replaced before the shape inference
+ 'in_ports_count': 3,
+ 'out_ports_count': 1,
+ 'mode': 'constant',
+ }, attrs)