Convert Pad to GroupConvolution transformation (#1826)
authorGleb Kazantaev <gleb.kazantaev@intel.com>
Tue, 18 Aug 2020 14:40:45 +0000 (17:40 +0300)
committerGitHub <noreply@github.com>
Tue, 18 Aug 2020 14:40:45 +0000 (17:40 +0300)
* Added ConvertPadToConvolution pass

* Removed NasNet transformation from MO

* Renamed Conv to GroupConv; Added tests

* Fixed typo; Added RTTI

inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
inference-engine/src/transformations/include/transformations/convert_pad_to_group_conv.hpp [new file with mode: 0644]
inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp
inference-engine/src/transformations/src/transformations/convert_pad_to_group_conv.cpp [new file with mode: 0644]
inference-engine/tests/functional/inference_engine/transformations/convert_pad_to_group_conv.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/subgraph_tests/convert_pad_to_group_conv.cpp [new file with mode: 0644]
model-optimizer/automation/package_BOM.txt
model-optimizer/extensions/middle/NasNet.py [deleted file]

index b9bb493..dea6792 100644 (file)
@@ -29,7 +29,6 @@
 #include <transformations/convert_precision.hpp>
 #include <transformations/rt_info/fused_names_attribute.hpp>
 #include <transformations/tensor_iterator_transformations/apply_transformations_to_ti_body.hpp>
-#include <ngraph/opsets/opset1.hpp>
 #include <ngraph/opsets/opset2.hpp>
 #include <ngraph/opsets/opset3.hpp>
 #include <ngraph/opsets/opset4.hpp>
@@ -82,7 +81,8 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork) {
                std::dynamic_pointer_cast<const ngraph::opset2::BatchToSpace>(node) ||
                std::dynamic_pointer_cast<const ngraph::opset2::SpaceToBatch>(node) ||
                std::dynamic_pointer_cast<const ngraph::opset4::ReduceL1>(node) ||
-               std::dynamic_pointer_cast<const ngraph::opset4::ReduceL2>(node);
+               std::dynamic_pointer_cast<const ngraph::opset4::ReduceL2>(node) ||
+               std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node);
     };
     auto nGraphFunc = clonedNetwork->getFunction();
     // Disable shape inference (WA for generic operations)
diff --git a/inference-engine/src/transformations/include/transformations/convert_pad_to_group_conv.hpp b/inference-engine/src/transformations/include/transformations/convert_pad_to_group_conv.hpp
new file mode 100644 (file)
index 0000000..255aeb7
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API ConvertPadToGroupConvolution;
+
+}  // namespace pass
+}  // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief ConvertPadToGroupConvolution transformation replaces Pad operation
+ * with GroupConvolution but has some restrictions on Pad parameters:
+ * 1. PadMode must be Constant and value is equal to 0
+ * 2. Padding must be applied only for spatial dimensions
+ * 3. Input shape rank must be static and greater than 3
+ */
+
+class ngraph::pass::ConvertPadToGroupConvolution: public ngraph::pass::MatcherPass {
+public:
+    NGRAPH_RTTI_DECLARATION;
+    ConvertPadToGroupConvolution();
+};
index eb5ee57..5fc53d7 100644 (file)
@@ -8,6 +8,7 @@
 #include "transformations/depth_to_space_fusion.hpp"
 #include "transformations/optimize_strided_slice.hpp"
 #include "transformations/convert_scatter_elements_to_scatter.hpp"
+#include "transformations/convert_pad_to_group_conv.hpp"
 #include "transformations/remove_filtering_boxes_by_size.hpp"
 #include "transformations/init_node_info.hpp"
 #include "transformations/itt.hpp"
@@ -36,6 +37,7 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr<ngraph::
     manager.register_pass<ngraph::pass::DepthToSpaceFusion>();
     manager.register_pass<ngraph::pass::MishFusion>();
     manager.register_pass<ngraph::pass::SwishFusion>();
+    manager.register_pass<ngraph::pass::ConvertPadToGroupConvolution>();
 
     manager.set_callback(m_transformation_callback);
     manager.run_passes(f);
diff --git a/inference-engine/src/transformations/src/transformations/convert_pad_to_group_conv.cpp b/inference-engine/src/transformations/src/transformations/convert_pad_to_group_conv.cpp
new file mode 100644 (file)
index 0000000..9576aa7
--- /dev/null
@@ -0,0 +1,85 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/convert_pad_to_group_conv.hpp"
+
+#include <memory>
+#include <vector>
+
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/rt_info.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+#include <ngraph/pattern/op/pattern.hpp>
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertPadToGroupConvolution, "ConvertPadToGroupConvolution", 0);
+
+ngraph::pass::ConvertPadToGroupConvolution::ConvertPadToGroupConvolution() {
+    auto neg = ngraph::pattern::wrap_type<opset4::Pad>(pattern::has_static_dim(1));
+
+    ngraph::matcher_pass_callback callback = [this](pattern::Matcher& m) {
+        auto pad = std::dynamic_pointer_cast<ngraph::opset4::Pad> (m.get_match_root());
+        if (!pad || !m_transformation_callback(pad) /* disabled by default */) {
+            return false;
+        }
+
+        auto input = pad->input_value(0);
+        const auto & channel_dim = input.get_partial_shape()[1].get_length();
+        const auto & rank = input.get_partial_shape().rank().get_length();
+        if (rank < 4) {
+            // We can not create Convolution without spatial dimensions.
+            // Also creating Convolution with single spatial dimension won't be effective as
+            // we later insert additional Reshape operations.
+            return false;
+        }
+
+        // Check that Pad has CONSTANT mode and value is equal to 0 if 4th input exists
+        if (pad->get_pad_mode() != op::PadMode::CONSTANT) {
+            return false;
+        }
+
+        if (pad->inputs().size() == 4) {
+            if (auto pad_value = std::dynamic_pointer_cast<opset4::Constant>(pad->input_value(3).get_node_shared_ptr())) {
+                // pad value is a scalar
+                if (pad_value->cast_vector<float>()[0] != 0) {
+                    return false;
+                }
+            }
+        }
+
+        // Check that Pad has padding only for spatial dimensions
+        const auto & pad_begin = pad->get_pads_begin();
+        const auto & pad_end = pad->get_pads_end();
+
+        if (pad_begin.empty() || pad_end.empty()) {
+            // pads will be empty if inputs are not constants
+            return false;
+        }
+
+        // Check that not spatial dimension are not padded
+        if (std::any_of(pad_begin.begin(), pad_begin.begin() + 2, [](ptrdiff_t value) { return value != 0; }) ||
+            std::any_of(pad_end.begin(), pad_end.begin() + 2, [](ptrdiff_t value) { return value != 0; })) {
+            return false;
+        }
+
+        // Create fake weights with ones GOIXY
+        Shape weights_shape(rank + 1, 1);
+        weights_shape[0] = channel_dim; // G dimension
+        auto weights = opset4::Constant::create(pad->input(0).get_element_type(), weights_shape, {1});
+
+        // Create GroupConvolution attributes
+        Strides stride(rank - 2, 1);
+        CoordinateDiff new_pad_begin{pad_begin.begin() + 2, pad_begin.end()};
+        CoordinateDiff new_pad_end{pad_end.begin() + 2, pad_end.end()};
+
+        auto conv = std::make_shared<opset4::GroupConvolution>(input, weights, stride, new_pad_begin, new_pad_end, stride);
+
+        conv->set_friendly_name(pad->get_friendly_name());
+        ngraph::copy_runtime_info(pad, conv);
+        ngraph::replace_node(pad, conv);
+        return true;
+    };
+
+    auto m = std::make_shared<ngraph::pattern::Matcher>(neg, "ConvertPadToGroupConvolution");
+    this->register_matcher(m, callback);
+}
\ No newline at end of file
diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_pad_to_group_conv.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_pad_to_group_conv.cpp
new file mode 100644 (file)
index 0000000..3b38707
--- /dev/null
@@ -0,0 +1,172 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <string>
+#include <memory>
+#include <queue>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset4.hpp>
+#include <transformations/convert_pad_to_group_conv.hpp>
+#include <transformations/init_node_info.hpp>
+#include <transformations/utils/utils.hpp>
+#include <ngraph/pass/manager.hpp>
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+
+using namespace testing;
+using namespace ngraph;
+
+TEST(TransformationTests, ConvertPadToConv) {
+    std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
+    {
+        auto input = std::make_shared<opset4::Parameter>(element::f32, Shape{1, 3, 64, 64});
+        auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0});
+        auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1});
+        auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0});
+        auto pad_mode = op::PadMode::CONSTANT;
+        auto pad = std::make_shared<opset4::Pad>(input, pad_begin, pad_end, pad_value, pad_mode);
+        f = std::make_shared<Function>(NodeVector{pad}, ParameterVector{input});
+
+        const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
+            return std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node) != nullptr;
+        };
+
+        pass::Manager manager;
+        manager.register_pass<pass::InitNodeInfo>();
+        manager.register_pass<pass::ConvertPadToGroupConvolution>();
+        manager.set_callback(transformations_callback);
+        manager.run_passes(f);
+
+        ASSERT_NO_THROW(check_rt_info(f));
+    }
+
+    {
+        auto input = std::make_shared<opset4::Parameter>(element::f32, Shape{1, 3, 64, 64});
+        auto weights = opset4::Constant::create(element::f32, Shape{3, 1, 1, 1, 1}, {1});
+        Strides stride{1, 1};
+        CoordinateDiff pad_begin{1, 0}, pad_end{0, 1};
+        auto conv = std::make_shared<opset4::GroupConvolution>(input, weights, stride, pad_begin, pad_end, stride);
+
+        f_ref = std::make_shared<Function>(NodeVector{conv}, ParameterVector{input});
+    }
+
+    auto res = compare_functions(f, f_ref);
+    ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, ConvertPadToConvNeg1) {
+    auto get_function = []() -> std::shared_ptr<Function> {
+        auto input = std::make_shared<opset4::Parameter>(element::f32, Shape{1, 3, 64, 64});
+        auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {1, 0, 1, 0}); // Batch dim padding
+        auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1});
+        auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0});
+        auto pad_mode = op::PadMode::CONSTANT;
+        auto pad = std::make_shared<opset4::Pad>(input, pad_begin, pad_end, pad_value, pad_mode);
+        return std::make_shared<Function>(NodeVector{pad}, ParameterVector{input});
+    };
+
+    const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
+            return !!std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node);
+    };
+
+    std::shared_ptr<Function> f(get_function()), f_ref(get_function());
+    pass::Manager manager;
+    manager.register_pass<pass::InitNodeInfo>();
+    manager.register_pass<pass::ConvertPadToGroupConvolution>();
+    manager.set_callback(transformations_callback);
+    manager.run_passes(f);
+
+    ASSERT_NO_THROW(check_rt_info(f));
+
+    auto res = compare_functions(f, f_ref);
+    ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, ConvertPadToConvNeg2) {
+    auto get_function = []() -> std::shared_ptr<Function> {
+        auto input = std::make_shared<opset4::Parameter>(element::f32, Shape{1, 3, 64, 64});
+        auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0});
+        auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 1, 0, 1}); // Channel dim padding
+        auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0});
+        auto pad_mode = op::PadMode::CONSTANT;
+        auto pad = std::make_shared<opset4::Pad>(input, pad_begin, pad_end, pad_value, pad_mode);
+        return std::make_shared<Function>(NodeVector{pad}, ParameterVector{input});
+    };
+
+    const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
+            return !!std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node);
+    };
+
+    std::shared_ptr<Function> f(get_function()), f_ref(get_function());
+    pass::Manager manager;
+    manager.register_pass<pass::InitNodeInfo>();
+    manager.register_pass<pass::ConvertPadToGroupConvolution>();
+    manager.set_callback(transformations_callback);
+    manager.run_passes(f);
+
+    ASSERT_NO_THROW(check_rt_info(f));
+
+    auto res = compare_functions(f, f_ref);
+    ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, ConvertPadToConvNeg3) {
+    auto get_function = []() -> std::shared_ptr<Function> {
+        auto input = std::make_shared<opset4::Parameter>(element::f32, Shape{1, 3, 64, 64});
+        auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0});
+        auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1});
+        auto pad_value = opset4::Constant::create(element::f32, Shape{}, {0});
+        auto pad_mode = op::PadMode::SYMMETRIC; // Unsupported mode
+        auto pad = std::make_shared<opset4::Pad>(input, pad_begin, pad_end, pad_value, pad_mode);
+        return std::make_shared<Function>(NodeVector{pad}, ParameterVector{input});
+    };
+
+    const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
+            return !!std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node);
+    };
+
+    std::shared_ptr<Function> f(get_function()), f_ref(get_function());
+    pass::Manager manager;
+    manager.register_pass<pass::InitNodeInfo>();
+    manager.register_pass<pass::ConvertPadToGroupConvolution>();
+    manager.set_callback(transformations_callback);
+    manager.run_passes(f);
+
+    ASSERT_NO_THROW(check_rt_info(f));
+
+    auto res = compare_functions(f, f_ref);
+    ASSERT_TRUE(res.first) << res.second;
+}
+
+
+TEST(TransformationTests, ConvertPadToConvNeg4) {
+    auto get_function = []() -> std::shared_ptr<Function> {
+        auto input = std::make_shared<opset4::Parameter>(element::f32, Shape{1, 3, 64, 64});
+        auto pad_begin = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 1, 0});
+        auto pad_end = opset4::Constant::create(element::i64, Shape{4}, {0, 0, 0, 1});
+        auto pad_value = opset4::Constant::create(element::f32, Shape{}, {1.}); // Unsupported value
+        auto pad_mode = op::PadMode::CONSTANT;
+        auto pad = std::make_shared<opset4::Pad>(input, pad_begin, pad_end, pad_value, pad_mode);
+        return std::make_shared<Function>(NodeVector{pad}, ParameterVector{input});
+    };
+
+    const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
+            return !!std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node);
+    };
+
+    std::shared_ptr<Function> f(get_function()), f_ref(get_function());
+    pass::Manager manager;
+    manager.register_pass<pass::InitNodeInfo>();
+    manager.register_pass<pass::ConvertPadToGroupConvolution>();
+    manager.set_callback(transformations_callback);
+    manager.run_passes(f);
+
+    ASSERT_NO_THROW(check_rt_info(f));
+
+    auto res = compare_functions(f, f_ref);
+    ASSERT_TRUE(res.first) << res.second;
+}
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/convert_pad_to_group_conv.cpp
new file mode 100644 (file)
index 0000000..e1f2d28
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "subgraph_tests/convert_pad_to_group_conv.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+    const std::vector<std::vector<int64_t>> pads_1d{
+            {0, 0, 0}, {0, 0, 1}, {0, 2, 0}, {3, 0, 0}
+    };
+
+    const std::vector<float> values{0., 1.};
+
+    INSTANTIATE_TEST_CASE_P(Pad_1D, ConvertPadToConvTests,
+                            ::testing::Combine(
+                                    ::testing::Values(ngraph::Shape{1, 8, 64}),
+                                    ::testing::ValuesIn(pads_1d),
+                                    ::testing::ValuesIn(pads_1d),
+                                    ::testing::ValuesIn(values),
+                                    ::testing::Values(ngraph::op::PadMode::CONSTANT),
+                                    ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                            ConvertPadToConvTests::getTestCaseName);
+
+    const std::vector<std::vector<int64_t>> pads_2d{
+            {0, 0, 0, 0}, {0, 0, 1, 2}, {0, 0, 2, 1},
+            {0, 0, 10, 10}, {0, 0, 0, 4}, {0, 0, 4, 0}
+    };
+
+    INSTANTIATE_TEST_CASE_P(Pad_2D, ConvertPadToConvTests,
+                            ::testing::Combine(
+                                    ::testing::Values(ngraph::Shape{1, 8, 64, 16}),
+                                    ::testing::ValuesIn(pads_2d),
+                                    ::testing::ValuesIn(pads_2d),
+                                    ::testing::ValuesIn(values),
+                                    ::testing::Values(ngraph::op::PadMode::CONSTANT),
+                                    ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                            ConvertPadToConvTests::getTestCaseName);
+}  // namespace
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/convert_pad_to_group_conv.hpp
new file mode 100644 (file)
index 0000000..b7c116d
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+#include <ngraph/shape.hpp>
+#include <ngraph/node.hpp>
+
+namespace LayerTestsDefinitions {
+
+typedef std::tuple<
+        ngraph::Shape,              // input shape
+        std::vector<int64_t>,       // pad_begin
+        std::vector<int64_t>,       // pad_end
+        float,                      // pad_value
+        ngraph::op::PadMode,        // pad_mode
+        std::string                 // Device name
+        > PadParams;
+
+class ConvertPadToConvTests
+        : public testing::WithParamInterface<PadParams>,
+          public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(const testing::TestParamInfo<PadParams> &obj);
+
+protected:
+    void SetUp() override;
+};
+} // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/convert_pad_to_group_conv.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/convert_pad_to_group_conv.cpp
new file mode 100644 (file)
index 0000000..4d28a16
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include <debug.h>
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/precision_utils.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "subgraph_tests/convert_pad_to_group_conv.hpp"
+
+namespace LayerTestsDefinitions {
+
+std::string ConvertPadToConvTests::getTestCaseName(const testing::TestParamInfo<PadParams> &obj) {
+    ngraph::Shape input_shape;
+    std::string targetName;
+    std::vector<int64_t> pad_begin, pad_end;
+    ngraph::op::PadMode mode;
+    float value;
+    std::tie(input_shape, pad_begin, pad_end, value, mode, targetName) = obj.param;
+    std::ostringstream results;
+
+    results << "Input" << CommonTestUtils::vec2str(input_shape);
+    results << "PadBegin" << CommonTestUtils::vec2str(pad_begin);
+    results << "PadEnd" << CommonTestUtils::vec2str(pad_end);
+    results << "Value" << value;
+    results << "Mode" << mode;
+    results << "targetDevice=" << targetName << "_";
+    return results.str();
+}
+
+void ConvertPadToConvTests::SetUp() {
+    ngraph::Shape input_shape;
+    std::vector<int64_t> pad_begin, pad_end;
+    ngraph::op::PadMode mode;
+    float value;
+    std::tie(input_shape, pad_begin, pad_end, value, mode, targetDevice) = this->GetParam();
+
+    {
+        auto param = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, input_shape);
+        auto pad = std::make_shared<ngraph::opset4::Pad>(param,
+                                                         ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_begin.size()}, pad_begin),
+                                                         ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{pad_end.size()}, pad_end),
+                                                         ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{}, {value}), mode);
+        auto relu = std::make_shared<ngraph::opset4::Relu>(pad);
+        function = std::make_shared<ngraph::Function>(ngraph::OutputVector{relu}, ngraph::ParameterVector{param}, "pad");
+    }
+}
+
+TEST_P(ConvertPadToConvTests, CompareWithRefs) {
+    Run();
+}
+} // namespace LayerTestsDefinitions
index b756cb5..72b962d 100644 (file)
@@ -534,7 +534,6 @@ extensions/middle/MulFakeQuantizeFuse.py
 extensions/middle/MXNetRNNSequenceNormalize.py
 extensions/middle/MXNetSplitMultiLayers.py
 extensions/middle/MXTileReplacer.py
-extensions/middle/NasNet.py
 extensions/middle/ONNXRNNSequenceNormalize.py
 extensions/middle/PartialInfer.py
 extensions/middle/pass_separator.py
diff --git a/model-optimizer/extensions/middle/NasNet.py b/model-optimizer/extensions/middle/NasNet.py
deleted file mode 100644 (file)
index 5fb90a6..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-"""
- Copyright (C) 2018-2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import logging as log
-
-import numpy as np
-
-from extensions.middle.pass_separator import PostMiddleStart
-from mo.front.common.partial_infer.utils import int64_array
-from mo.graph.graph import Graph
-from mo.middle.replacement import MiddleReplacementPattern
-from mo.ops.const import Const
-from mo.ops.convolution import Convolution
-from mo.ops.crop import Crop
-
-
-class NasNet(MiddleReplacementPattern):
-    enabled = True
-
-    def run_after(self):
-        from extensions.middle.pass_separator import MiddleFinish
-        return [MiddleFinish]
-
-    def run_before(self):
-        return [PostMiddleStart]
-
-    def pattern(self):
-        return dict(
-            nodes=[
-                ('input', dict(kind='data')),
-                ('pad_op', dict(kind='op', op='Pad')),
-                ('pad_out', dict(kind='data')),
-
-                ('begin', dict(kind='data')),
-                ('end', dict(kind='data')),
-                ('stride', dict(kind='data')),
-
-                ('sslice', dict(kind='op', op='StridedSlice')),
-                ('sslice_out', dict(kind='data')),
-
-                ('avg_pool', dict(kind='op', op='AvgPool')),
-                ('output', dict(kind='data')),
-            ],
-            edges=[
-                ('input', 'pad_op', {'in': 0}),
-                ('pad_op', 'pad_out'),
-
-                ('begin', 'sslice', {'in': 1}),
-                ('end', 'sslice', {'in': 2}),
-                ('stride', 'sslice', {'in': 3}),
-
-                ('pad_out', 'sslice', {'in': 0}),
-                ('sslice', 'sslice_out'),
-
-                ('sslice_out', 'avg_pool', {'in': 0}),
-                ('avg_pool', 'output')
-            ]
-        )
-
-    def replace_pattern(self, graph: Graph, match: dict):
-        """
-        Converts specific for NasNet topology subgraph Pad->StridedSlice->AvgPool to Conv->Crop->AvgPool
-        """
-        input = match['input']
-
-        pad_node = match['pad_op']
-        pad_node_name = pad_node.soft_get('name', pad_node.id)
-
-        sslice_node = match['sslice']
-        begin = []
-        end = []
-        stride = []
-        for s in sslice_node.slices:
-            begin.append(s.start)
-            end.append(s.stop)
-            stride.append(s.step)
-
-        pads_begin = pad_node.in_port(1).data.get_value()
-        pads_end = pad_node.in_port(2).data.get_value()
-        if pads_begin is None or pads_end is None:
-            log.error('Pad values for node "{}" are not constants'.format(pad_node_name))
-            return
-
-        if not np.array_equal(pads_begin, int64_array([0, 0, 0, 0])):
-            log.error('Pad begin values doesn\'t match for node {}!'.format(pad_node_name))
-            return
-
-        if not np.array_equal(pads_end, int64_array([0, 1, 1, 0])):
-            log.error('Pad end values doesn\'t match for node {}!'.format(pad_node_name))
-            return
-
-        if not np.array_equal(begin, int64_array([0, 1, 1, 0])):
-            log.error("StridedSlice has wrong begin")
-            return
-
-        if not np.array_equal(sslice_node.end_mask, int64_array([0, 0, 0, 0])) or not np.array_equal(sslice_node.begin_mask,
-                                                                                                int64_array(
-                                                                                                    [0, 1, 1, 0])):
-            log.error("StridedSlice has wrong masks")
-            return
-
-        # Pad -> Conv
-        conv_name = graph.unique_id(pad_node.name + '/Conv_')
-        conv_weights_name = graph.unique_id(pad_node.name + '/ConvW_')
-        conv_weights = np.ones((input.shape[3], 1, 1, 1))
-        output_shape = int64_array([input.shape[0], input.shape[1] + 1, input.shape[2] + 1, input.shape[3]])
-
-        conv_node = Convolution(graph, dict(name=conv_name,
-                                            stride=int64_array([1, 1, 1, 1]),
-                                            dilation=int64_array([1, 1, 1, 1]),
-                                            group=input.shape[3],
-                                            bias_addable=True,
-                                            bias_term=False,
-                                            spatial_dims=int64_array([1, 2]),
-                                            kernel_spatial=int64_array([1, 1]),
-                                            pad=int64_array([[0, 0], [0, 1], [0, 1], [0, 0]]),
-                                            output_shape=output_shape,
-                                            batch_dims=int64_array([0]),
-                                            channel_dims=int64_array([3]),
-                                            output=input.shape[3],
-                                            input_feature_channel=1,
-                                            output_feature_channel=0,
-                                            )).create_node()
-
-        weights_const_node = Const(graph, dict(name=conv_weights_name, value=conv_weights,
-                                          shape=int64_array(conv_weights.shape))).create_node()
-
-        # StridedSlice -> Crop
-        crop_node = Crop(graph, dict(name=sslice_node.name + '/Crop_', axis=int64_array([1, 2]),
-                                dim=int64_array([output_shape[1] - 1, output_shape[2] - 1]), offset=int64_array([1, 1]))
-                    ).create_node()
-
-        # Connect nodes
-        pad_node.in_port(0).get_connection().set_destination(conv_node.in_port(0))
-        weights_const_node.out_port(0).connect(conv_node.in_port(1))
-        conv_node.out_port(0).connect(crop_node.in_port(0))
-        sslice_node.out_port(0).get_connection().set_source(crop_node.out_port(0))
-
-        conv_node.in_port(1).bin = 'weights'
-
-        # Remove Pad and StridedSlice nodes from graph
-        graph.remove_node(pad_node.id)
-        graph.remove_node(sslice_node.id)