From: Gleb Kazantaev Date: Tue, 18 Aug 2020 14:40:45 +0000 (+0300) Subject: Convert Pad to GroupConvolution transformation (#1826) X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8c5262f8649d542a2e15ac6d965d58581fb45e68;p=platform%2Fupstream%2Fdldt.git Convert Pad to GroupConvolution transformation (#1826) * Added ConvertPadToConvolution pass * Removed NasNet transformation from MO * Renamed Conv to GroupConv; Added tests * Fixed typo; Added RTTI --- diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index b9bb493..dea6792 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -82,7 +81,8 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork) { std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node); + std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node); }; auto nGraphFunc = clonedNetwork->getFunction(); // Disable shape inference (WA for generic operations) diff --git a/inference-engine/src/transformations/include/transformations/convert_pad_to_group_conv.hpp b/inference-engine/src/transformations/include/transformations/convert_pad_to_group_conv.hpp new file mode 100644 index 0000000..255aeb7 --- /dev/null +++ b/inference-engine/src/transformations/include/transformations/convert_pad_to_group_conv.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include + +#include + +namespace ngraph { +namespace pass { + +class TRANSFORMATIONS_API ConvertPadToGroupConvolution; + +} // namespace pass +} // namespace ngraph + +/** + * @ingroup ie_transformation_common_api + * @brief ConvertPadToGroupConvolution transformation replaces Pad operation + * with GroupConvolution but has some restrictions on Pad parameters: + * 1. PadMode must be Constant and value is equal to 0 + * 2. Padding must be applied only for spatial dimensions + * 3. Input shape rank must be static and greater than 3 + */ + +class ngraph::pass::ConvertPadToGroupConvolution: public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + ConvertPadToGroupConvolution(); +}; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp index eb5ee57..5fc53d7 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -8,6 +8,7 @@ #include "transformations/depth_to_space_fusion.hpp" #include "transformations/optimize_strided_slice.hpp" #include "transformations/convert_scatter_elements_to_scatter.hpp" +#include "transformations/convert_pad_to_group_conv.hpp" #include "transformations/remove_filtering_boxes_by_size.hpp" #include "transformations/init_node_info.hpp" #include "transformations/itt.hpp" @@ -36,6 +37,7 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); manager.set_callback(m_transformation_callback); manager.run_passes(f); diff --git a/inference-engine/src/transformations/src/transformations/convert_pad_to_group_conv.cpp b/inference-engine/src/transformations/src/transformations/convert_pad_to_group_conv.cpp new file mode 100644 index 0000000..9576aa7 --- /dev/null +++ b/inference-engine/src/transformations/src/transformations/convert_pad_to_group_conv.cpp @@ -0,0 +1,85 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/convert_pad_to_group_conv.hpp" + +#include +#include + +#include +#include +#include +#include + +NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertPadToGroupConvolution, "ConvertPadToGroupConvolution", 0); + +ngraph::pass::ConvertPadToGroupConvolution::ConvertPadToGroupConvolution() { + auto neg = ngraph::pattern::wrap_type(pattern::has_static_dim(1)); + + ngraph::matcher_pass_callback callback = [this](pattern::Matcher& m) { + auto pad = std::dynamic_pointer_cast (m.get_match_root()); + if (!pad || !m_transformation_callback(pad) /* disabled by default */) { + return false; + } + + auto input = pad->input_value(0); + const auto & channel_dim = input.get_partial_shape()[1].get_length(); + const auto & rank = input.get_partial_shape().rank().get_length(); + if (rank < 4) { + // We can not create Convolution without spatial dimensions. + // Also creating Convolution with single spatial dimension won't be effective as + // we later insert additional Reshape operations. + return false; + } + + // Check that Pad has CONSTANT mode and value is equal to 0 if 4th input exists + if (pad->get_pad_mode() != op::PadMode::CONSTANT) { + return false; + } + + if (pad->inputs().size() == 4) { + if (auto pad_value = std::dynamic_pointer_cast(pad->input_value(3).get_node_shared_ptr())) { + // pad value is a scalar + if (pad_value->cast_vector()[0] != 0) { + return false; + } + } + } + + // Check that Pad has padding only for spatial dimensions + const auto & pad_begin = pad->get_pads_begin(); + const auto & pad_end = pad->get_pads_end(); + + if (pad_begin.empty() || pad_end.empty()) { + // pads will be empty if inputs are not constants + return false; + } + + // Check that not spatial dimension are not padded + if (std::any_of(pad_begin.begin(), pad_begin.begin() + 2, [](ptrdiff_t value) { return value != 0; }) || + std::any_of(pad_end.begin(), pad_end.begin() + 2, [](ptrdiff_t value) { return value != 0; })) { + return false; + } + + // Create fake weights with ones GOIXY + Shape weights_shape(rank + 1, 1); + weights_shape[0] = channel_dim; // G dimension + auto weights = opset4::Constant::create(pad->input(0).get_element_type(), weights_shape, {1}); + + // Create GroupConvolution attributes + Strides stride(rank - 2, 1); + CoordinateDiff new_pad_begin{pad_begin.begin() + 2, pad_begin.end()}; + CoordinateDiff new_pad_end{pad_end.begin() + 2, pad_end.end()}; + + auto conv = std::make_shared(input, weights, stride, new_pad_begin, new_pad_end, stride); + + conv->set_friendly_name(pad->get_friendly_name()); + ngraph::copy_runtime_info(pad, conv); + ngraph::replace_node(pad, conv); + return true; + }; + + auto m = std::make_shared(neg, "ConvertPadToGroupConvolution"); + this->register_matcher(m, callback); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_pad_to_group_conv.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_pad_to_group_conv.cpp new file mode 100644 index 0000000..3b38707 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_pad_to_group_conv.cpp @@ -0,0 +1,172 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" + +using namespace testing; +using namespace ngraph; + +TEST(TransformationTests, ConvertPadToConv) { + std::shared_ptr f(nullptr), f_ref(nullptr); + { + auto input = std::make_shared(element::f32, Shape{1, 3, 64, 64}); + auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0}); + auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1}); + auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0}); + auto pad_mode = op::PadMode::CONSTANT; + auto pad = std::make_shared(input, pad_begin, pad_end, pad_value, pad_mode); + f = std::make_shared(NodeVector{pad}, ParameterVector{input}); + + const auto transformations_callback = [](const std::shared_ptr &node) -> bool { + return std::dynamic_pointer_cast(node) != nullptr; + }; + + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.set_callback(transformations_callback); + manager.run_passes(f); + + ASSERT_NO_THROW(check_rt_info(f)); + } + + { + auto input = std::make_shared(element::f32, Shape{1, 3, 64, 64}); + auto weights = opset4::Constant::create(element::f32, Shape{3, 1, 1, 1, 1}, {1}); + Strides stride{1, 1}; + CoordinateDiff pad_begin{1, 0}, pad_end{0, 1}; + auto conv = std::make_shared(input, weights, stride, pad_begin, pad_end, stride); + + f_ref = std::make_shared(NodeVector{conv}, ParameterVector{input}); + } + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, ConvertPadToConvNeg1) { + auto get_function = []() -> std::shared_ptr { + auto input = std::make_shared(element::f32, Shape{1, 3, 64, 64}); + auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {1, 0, 1, 0}); // Batch dim padding + auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1}); + auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0}); + auto pad_mode = op::PadMode::CONSTANT; + auto pad = std::make_shared(input, pad_begin, pad_end, pad_value, pad_mode); + return std::make_shared(NodeVector{pad}, ParameterVector{input}); + }; + + const auto transformations_callback = [](const std::shared_ptr &node) -> bool { + return !!std::dynamic_pointer_cast(node); + }; + + std::shared_ptr f(get_function()), f_ref(get_function()); + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.set_callback(transformations_callback); + manager.run_passes(f); + + ASSERT_NO_THROW(check_rt_info(f)); + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, ConvertPadToConvNeg2) { + auto get_function = []() -> std::shared_ptr { + auto input = std::make_shared(element::f32, Shape{1, 3, 64, 64}); + auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0}); + auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 1, 0, 1}); // Channel dim padding + auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0}); + auto pad_mode = op::PadMode::CONSTANT; + auto pad = std::make_shared(input, pad_begin, pad_end, pad_value, pad_mode); + return std::make_shared(NodeVector{pad}, ParameterVector{input}); + }; + + const auto transformations_callback = [](const std::shared_ptr &node) -> bool { + return !!std::dynamic_pointer_cast(node); + }; + + std::shared_ptr f(get_function()), f_ref(get_function()); + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.set_callback(transformations_callback); + manager.run_passes(f); + + ASSERT_NO_THROW(check_rt_info(f)); + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} + +TEST(TransformationTests, ConvertPadToConvNeg3) { + auto get_function = []() -> std::shared_ptr { + auto input = std::make_shared(element::f32, Shape{1, 3, 64, 64}); + auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0}); + auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1}); + auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0}); + auto pad_mode = op::PadMode::SYMMETRIC; // Unsupported mode + auto pad = std::make_shared(input, pad_begin, pad_end, pad_value, pad_mode); + return std::make_shared(NodeVector{pad}, ParameterVector{input}); + }; + + const auto transformations_callback = [](const std::shared_ptr &node) -> bool { + return !!std::dynamic_pointer_cast(node); + }; + + std::shared_ptr f(get_function()), f_ref(get_function()); + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.set_callback(transformations_callback); + manager.run_passes(f); + + ASSERT_NO_THROW(check_rt_info(f)); + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} + + +TEST(TransformationTests, ConvertPadToConvNeg4) { + auto get_function = []() -> std::shared_ptr { + auto input = std::make_shared(element::f32, Shape{1, 3, 64, 64}); + auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0}); + auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1}); + auto pad_value = opset4::Constant::create(element::f32, Shape{}, {1.}); // Unsupported value + auto pad_mode = op::PadMode::CONSTANT; + auto pad = std::make_shared(input, pad_begin, pad_end, pad_value, pad_mode); + return std::make_shared(NodeVector{pad}, ParameterVector{input}); + }; + + const auto transformations_callback = [](const std::shared_ptr &node) -> bool { + return !!std::dynamic_pointer_cast(node); + }; + + std::shared_ptr f(get_function()), f_ref(get_function()); + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.set_callback(transformations_callback); + manager.run_passes(f); + + ASSERT_NO_THROW(check_rt_info(f)); + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp new file mode 100644 index 0000000..e1f2d28 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/convert_pad_to_group_conv.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + const std::vector> pads_1d{ + {0, 0, 0}, {0, 0, 1}, {0, 2, 0}, {3, 0, 0} + }; + + const std::vector values{0., 1.}; + + INSTANTIATE_TEST_CASE_P(Pad_1D, ConvertPadToConvTests, + ::testing::Combine( + ::testing::Values(ngraph::Shape{1, 8, 64}), + ::testing::ValuesIn(pads_1d), + ::testing::ValuesIn(pads_1d), + ::testing::ValuesIn(values), + ::testing::Values(ngraph::op::PadMode::CONSTANT), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvertPadToConvTests::getTestCaseName); + + const std::vector> pads_2d{ + {0, 0, 0, 0}, {0, 0, 1, 2}, {0, 0, 2, 1}, + {0, 0, 10, 10}, {0, 0, 0, 4}, {0, 0, 4, 0} + }; + + INSTANTIATE_TEST_CASE_P(Pad_2D, ConvertPadToConvTests, + ::testing::Combine( + ::testing::Values(ngraph::Shape{1, 8, 64, 16}), + ::testing::ValuesIn(pads_2d), + ::testing::ValuesIn(pads_2d), + ::testing::ValuesIn(values), + ::testing::Values(ngraph::op::PadMode::CONSTANT), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvertPadToConvTests::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp new file mode 100644 index 0000000..b7c116d --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include +#include +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include +#include + +namespace LayerTestsDefinitions { + +typedef std::tuple< + ngraph::Shape, // input shape + std::vector, // pad_begin + std::vector, // pad_end + float, // pad_value + ngraph::op::PadMode, // pad_mode + std::string // Device name + > PadParams; + +class ConvertPadToConvTests + : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/convert_pad_to_group_conv.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/convert_pad_to_group_conv.cpp new file mode 100644 index 0000000..4d28a16 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/convert_pad_to_group_conv.cpp @@ -0,0 +1,51 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "subgraph_tests/convert_pad_to_group_conv.hpp" + +namespace LayerTestsDefinitions { + +std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo &obj) { + ngraph::Shape input_shape; + std::string targetName; + std::vector pad_begin, pad_end; + ngraph::op::PadMode mode; + float value; + std::tie(input_shape, pad_begin, pad_end, value, mode, targetName) = obj.param; + std::ostringstream results; + + results << "Input" << CommonTestUtils::vec2str(input_shape); + results << "PadBegin" << CommonTestUtils::vec2str(pad_begin); + results << "PadEnd" << CommonTestUtils::vec2str(pad_end); + results << "Value" << value; + results << "Mode" << mode; + results << "targetDevice=" << targetName << "_"; + return results.str(); +} + +void ConvertPadToConvTests::SetUp() { + ngraph::Shape input_shape; + std::vector pad_begin, pad_end; + ngraph::op::PadMode mode; + float value; + std::tie(input_shape, pad_begin, pad_end, value, mode, targetDevice) = this->GetParam(); + + { + auto param = std::make_shared(ngraph::element::f32, input_shape); + auto pad = std::make_shared(param, + ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_begin.size()}, pad_begin), + ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_end.size()}, pad_end), + ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{}, {value}), mode); + auto relu = std::make_shared(pad); + function = std::make_shared(ngraph::OutputVector{relu}, ngraph::ParameterVector{param}, "pad"); + } +} + +TEST_P(ConvertPadToConvTests, CompareWithRefs) { + Run(); +} +} // namespace LayerTestsDefinitions diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index b756cb5..72b962d 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -534,7 +534,6 @@ extensions/middle/MulFakeQuantizeFuse.py extensions/middle/MXNetRNNSequenceNormalize.py extensions/middle/MXNetSplitMultiLayers.py extensions/middle/MXTileReplacer.py -extensions/middle/NasNet.py extensions/middle/ONNXRNNSequenceNormalize.py extensions/middle/PartialInfer.py extensions/middle/pass_separator.py diff --git a/model-optimizer/extensions/middle/NasNet.py b/model-optimizer/extensions/middle/NasNet.py deleted file mode 100644 index 5fb90a6..0000000 --- a/model-optimizer/extensions/middle/NasNet.py +++ /dev/null @@ -1,156 +0,0 @@ -""" - Copyright (C) 2018-2020 Intel Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -""" - -import logging as log - -import numpy as np - -from extensions.middle.pass_separator import PostMiddleStart -from mo.front.common.partial_infer.utils import int64_array -from mo.graph.graph import Graph -from mo.middle.replacement import MiddleReplacementPattern -from mo.ops.const import Const -from mo.ops.convolution import Convolution -from mo.ops.crop import Crop - - -class NasNet(MiddleReplacementPattern): - enabled = True - - def run_after(self): - from extensions.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def run_before(self): - return [PostMiddleStart] - - def pattern(self): - return dict( - nodes=[ - ('input', dict(kind='data')), - ('pad_op', dict(kind='op', op='Pad')), - ('pad_out', dict(kind='data')), - - ('begin', dict(kind='data')), - ('end', dict(kind='data')), - ('stride', dict(kind='data')), - - ('sslice', dict(kind='op', op='StridedSlice')), - ('sslice_out', dict(kind='data')), - - ('avg_pool', dict(kind='op', op='AvgPool')), - ('output', dict(kind='data')), - ], - edges=[ - ('input', 'pad_op', {'in': 0}), - ('pad_op', 'pad_out'), - - ('begin', 'sslice', {'in': 1}), - ('end', 'sslice', {'in': 2}), - ('stride', 'sslice', {'in': 3}), - - ('pad_out', 'sslice', {'in': 0}), - ('sslice', 'sslice_out'), - - ('sslice_out', 'avg_pool', {'in': 0}), - ('avg_pool', 'output') - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - """ - Converts specific for NasNet topology subgraph Pad->StridedSlice->AvgPool to Conv->Crop->AvgPool - """ - input = match['input'] - - pad_node = match['pad_op'] - pad_node_name = pad_node.soft_get('name', pad_node.id) - - sslice_node = match['sslice'] - begin = [] - end = [] - stride = [] - for s in sslice_node.slices: - begin.append(s.start) - end.append(s.stop) - stride.append(s.step) - - pads_begin = pad_node.in_port(1).data.get_value() - pads_end = pad_node.in_port(2).data.get_value() - if pads_begin is None or pads_end is None: - log.error('Pad values for node "{}" are not constants'.format(pad_node_name)) - return - - if not np.array_equal(pads_begin, int64_array([0, 0, 0, 0])): - log.error('Pad begin values doesn\'t match for node {}!'.format(pad_node_name)) - return - - if not np.array_equal(pads_end, int64_array([0, 1, 1, 0])): - log.error('Pad end values doesn\'t match for node {}!'.format(pad_node_name)) - return - - if not np.array_equal(begin, int64_array([0, 1, 1, 0])): - log.error("StridedSlice has wrong begin") - return - - if not np.array_equal(sslice_node.end_mask, int64_array([0, 0, 0, 0])) or not np.array_equal(sslice_node.begin_mask, - int64_array( - [0, 1, 1, 0])): - log.error("StridedSlice has wrong masks") - return - - # Pad -> Conv - conv_name = graph.unique_id(pad_node.name + '/Conv_') - conv_weights_name = graph.unique_id(pad_node.name + '/ConvW_') - conv_weights = np.ones((input.shape[3], 1, 1, 1)) - output_shape = int64_array([input.shape[0], input.shape[1] + 1, input.shape[2] + 1, input.shape[3]]) - - conv_node = Convolution(graph, dict(name=conv_name, - stride=int64_array([1, 1, 1, 1]), - dilation=int64_array([1, 1, 1, 1]), - group=input.shape[3], - bias_addable=True, - bias_term=False, - spatial_dims=int64_array([1, 2]), - kernel_spatial=int64_array([1, 1]), - pad=int64_array([[0, 0], [0, 1], [0, 1], [0, 0]]), - output_shape=output_shape, - batch_dims=int64_array([0]), - channel_dims=int64_array([3]), - output=input.shape[3], - input_feature_channel=1, - output_feature_channel=0, - )).create_node() - - weights_const_node = Const(graph, dict(name=conv_weights_name, value=conv_weights, - shape=int64_array(conv_weights.shape))).create_node() - - # StridedSlice -> Crop - crop_node = Crop(graph, dict(name=sslice_node.name + '/Crop_', axis=int64_array([1, 2]), - dim=int64_array([output_shape[1] - 1, output_shape[2] - 1]), offset=int64_array([1, 1])) - ).create_node() - - # Connect nodes - pad_node.in_port(0).get_connection().set_destination(conv_node.in_port(0)) - weights_const_node.out_port(0).connect(conv_node.in_port(1)) - conv_node.out_port(0).connect(crop_node.in_port(0)) - sslice_node.out_port(0).get_connection().set_source(crop_node.out_port(0)) - - conv_node.in_port(1).bin = 'weights' - - # Remove Pad and StridedSlice nodes from graph - graph.remove_node(pad_node.id) - graph.remove_node(sslice_node.id)