From 983e2a922f45f20e11b32781cde126946af83c6e Mon Sep 17 00:00:00 2001 From: Gleb Kazantaev Date: Fri, 14 Aug 2020 13:47:02 +0300 Subject: [PATCH] opset4 Convolution/GroupConvolution -> Multiply fusion (#1754) * Added new predicates for smart pattern matching * Added ConvMul and GroupConvMul fusion passes based on opset4; Added CPU functional tests for comparing fusion accuracy * Improved ConvMultiply fusion to support scalars; Added positive and negative tests * Added ConvolutionBackprop/GrouConvolutionBackprop Multiply fusion; Added functional tests --- .../common_optimizations/conv_mul_fusion.hpp | 43 ++++ .../common_optimizations/conv_mul_fusion.cpp | 283 +++++++++++++++++++++ .../convert_opset1_to_legacy.cpp | 20 +- .../subgraph_tests/conv_mul_fusion.cpp | 159 ++++++++++++ .../include/subgraph_tests/conv_mul_fusion.hpp | 34 +++ .../shared/src/subgraph_tests/conv_mul_fusion.cpp | 80 ++++++ ngraph/core/include/ngraph/pattern/op/label.hpp | 3 + ngraph/core/include/ngraph/pattern/op/pattern.hpp | 9 + ngraph/core/src/pattern/op/label.cpp | 5 + ngraph/core/src/pattern/op/pattern.cpp | 27 ++ 10 files changed, 658 insertions(+), 5 deletions(-) create mode 100644 inference-engine/src/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp create mode 100644 inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/conv_mul_fusion.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_mul_fusion.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/subgraph_tests/conv_mul_fusion.cpp diff --git a/inference-engine/src/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp b/inference-engine/src/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp new file mode 100644 index 0000000..dd19f0a --- /dev/null +++ b/inference-engine/src/transformations/include/transformations/common_optimizations/conv_mul_fusion.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include + +#include + +namespace ngraph { +namespace pass { + +class TRANSFORMATIONS_API ConvolutionMultiplyFusion; +class TRANSFORMATIONS_API GroupConvolutionMultiplyFusion; +class TRANSFORMATIONS_API ConvolutionBackpropDataMultiplyFusion; +class TRANSFORMATIONS_API GroupConvolutionBackpropDataMultiplyFusion; + +} // namespace pass +} // namespace ngraph + +class ngraph::pass::ConvolutionMultiplyFusion: public ngraph::pass::MatcherPass { +public: + ConvolutionMultiplyFusion(); +}; + +class ngraph::pass::GroupConvolutionMultiplyFusion: public ngraph::pass::MatcherPass { +public: + GroupConvolutionMultiplyFusion(); +}; + +class ngraph::pass::ConvolutionBackpropDataMultiplyFusion: public ngraph::pass::MatcherPass { +public: + ConvolutionBackpropDataMultiplyFusion(); +}; + +class ngraph::pass::GroupConvolutionBackpropDataMultiplyFusion: public ngraph::pass::MatcherPass { +public: + GroupConvolutionBackpropDataMultiplyFusion(); +}; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp new file mode 100644 index 0000000..bd3350d --- /dev/null +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/conv_mul_fusion.cpp @@ -0,0 +1,283 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/common_optimizations/conv_mul_fusion.hpp" + +#include +#include + +#include +#include +#include +#include +#include + + +bool check_shapes(const ngraph::Shape & ref_shape, const ngraph::Shape & other_shape) { + // Check that other_shape doesn't broadcast ref_shape + if (other_shape.size() > ref_shape.size()) { + return false; + } + auto ref_it = ref_shape.rbegin(); + auto other_it = other_shape.rbegin(); + // Check that other_shape dims are equal to ref_shape dims + // In case if other_shape rank is less than ref_shape rank + // we stop comparision and return true + while (other_it != other_shape.rend()) { + if (*other_it != *ref_it && *other_it != 1) { + return false; + } + ++other_it; + ++ref_it; + } + return true; +} + +ngraph::pass::ConvolutionMultiplyFusion::ConvolutionMultiplyFusion() { + auto input = pattern::any_input(); + auto weights = ngraph::pattern::any_input(pattern::has_static_dim(0) /* has OIYX layout */); + auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1)); + auto mul_const = ngraph::pattern::wrap_type(pattern::has_static_shape()); + auto mul = ngraph::pattern::wrap_type({conv, mul_const}); + + matcher_pass_callback callback = [conv, input, weights, mul, mul_const](pattern::Matcher & m) -> bool { + const auto & pattern_to_output = m.get_pattern_value_map(); + + const auto & m_weights = pattern_to_output.at(weights); + const auto & m_const = pattern_to_output.at(mul_const); + const auto & m_input = pattern_to_output.at(input); + const auto & m_conv = pattern_to_output.at(conv).get_node_shared_ptr(); + const auto & m_mul = pattern_to_output.at(mul).get_node_shared_ptr(); + + const auto & channel_dim = m_weights.get_partial_shape()[0].get_length(); + const auto & weights_rank = m_weights.get_partial_shape().rank().get_length(); + const auto & const_shape = m_const.get_shape(); + + bool is_scalar_multiplier(shape_size(const_shape) == 1); + + // Check that constant has shape [1, C, 1, 1] where the number of 1 is equal to + // the number of spatial dimensions or it's a scalar. That means that Constant + // applied per channel and can be fused into Convolution weights. + // Also Constant shape rank must be less or equal Convolution output shape + // otherwise fusion will break output broadcasting + auto expected_shape = Shape(weights_rank, 1); + expected_shape[1] = channel_dim; + + if (!check_shapes(expected_shape, const_shape)) { + return false; + } + + // Reshape constant to [C, 1, 1, 1] where the number of 1 is equal to + // the number of weights dimensions. In case of scalar we skip Reshape. + // This Reshape aligns Constant shape for multiplication with weights. + Output final_const = m_const; + if (!is_scalar_multiplier) { + auto final_const_shape = Shape(weights_rank, 1); + final_const_shape[0] = channel_dim; + final_const = std::make_shared(m_const, + opset4::Constant::create(ngraph::element::i64, ngraph::Shape{final_const_shape.size()}, + final_const_shape), true); + } + + // Multiply convolution weights with aligned Constant values + auto weights_multiply = std::make_shared(m_weights, final_const); + + // Replace Convolution->Multiply with Convolution with new inputs + auto new_conv = m_conv->copy_with_new_inputs({m_input, weights_multiply}); + new_conv->set_friendly_name(m_mul->get_friendly_name()); + copy_runtime_info({m_conv, m_mul}, {new_conv, final_const.get_node_shared_ptr(), weights_multiply}); + replace_node(m_mul, new_conv); + return true; + }; + + auto m = std::make_shared(mul, "ConvolutionMultiplyFusion"); + register_matcher(m, callback); +} + +ngraph::pass::GroupConvolutionMultiplyFusion::GroupConvolutionMultiplyFusion() { + auto input = pattern::any_input(); + auto weights = ngraph::pattern::any_input();//pattern::has_static_dims({0, 1}) /* has GOIYX layout */); + auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1)); + auto mul_const = ngraph::pattern::wrap_type();//pattern::has_static_shape()); + auto mul = ngraph::pattern::wrap_type({conv, mul_const}); + + matcher_pass_callback callback = [conv, input, weights, mul, mul_const](pattern::Matcher & m) -> bool { + const auto & pattern_to_output = m.get_pattern_value_map(); + + const auto & m_weights = pattern_to_output.at(weights); + const auto & m_const = pattern_to_output.at(mul_const); + const auto & m_input = pattern_to_output.at(input); + const auto & m_conv = pattern_to_output.at(conv).get_node_shared_ptr(); + const auto & m_mul = pattern_to_output.at(mul).get_node_shared_ptr(); + + const auto & G = m_weights.get_partial_shape()[0].get_length(); + const auto & O = m_weights.get_partial_shape()[1].get_length(); + const auto & weights_rank = m_weights.get_partial_shape().rank().get_length(); + const auto & const_shape = m_const.get_shape(); + + bool is_scalar_multiplier(shape_size(const_shape) == 1); + + // Check that constant has shape [1, C (G * O), 1, 1] where the number of 1 is equal to + // the number of spatial dimensions. That means that Constant applied per + // channel and can be fused into Convolution weights. + // Also Constant shape rank must be less or equal Convolution output shape + // otherwise fusion will break output broadcasting + auto expected_shape = Shape(weights_rank - 1, 1); + expected_shape[1] = G * O; + + if (!check_shapes(expected_shape, const_shape)) { + return false; + } + + // Reshape constant to [G, O, 1, 1, 1] where the number of 1 is equal to + // the number of weights dimensions. In case of scalar we skip Reshape. + // This Reshape aligns Constant shape for multiplication with weights. + Output final_const = m_const; + if (!is_scalar_multiplier) { + auto final_const_shape = Shape(weights_rank, 1); + final_const_shape[0] = G; + final_const_shape[1] = O; + final_const = std::make_shared(m_const, + opset4::Constant::create(ngraph::element::i64, ngraph::Shape{final_const_shape.size()}, + final_const_shape), true); + } + + // Multiply convolution weights with aligned Constant values + auto weights_multiply = std::make_shared(m_weights, final_const); + + // Replace Convolution->Multiply with Convolution with new inputs + auto new_conv = m_conv->copy_with_new_inputs({m_input, weights_multiply}); + new_conv->set_friendly_name(m_mul->get_friendly_name()); + copy_runtime_info({m_conv, m_mul}, {new_conv, final_const.get_node_shared_ptr(), weights_multiply}); + replace_node(m_mul, new_conv); + return true; + }; + + auto m = std::make_shared(mul, "GroupConvolutionMultiplyFusion"); + register_matcher(m, callback); +} + +ngraph::pass::ConvolutionBackpropDataMultiplyFusion::ConvolutionBackpropDataMultiplyFusion() { + auto input = pattern::any_input(); + auto weights = ngraph::pattern::any_input(pattern::has_static_dim(1) /* has IOYX layout */); + auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1)); + auto mul_const = ngraph::pattern::wrap_type(pattern::has_static_shape()); + auto mul = ngraph::pattern::wrap_type({conv, mul_const}); + + matcher_pass_callback callback = [conv, input, weights, mul, mul_const](pattern::Matcher & m) -> bool { + const auto & pattern_to_output = m.get_pattern_value_map(); + + const auto & m_weights = pattern_to_output.at(weights); + const auto & m_const = pattern_to_output.at(mul_const); + const auto & m_input = pattern_to_output.at(input); + const auto & m_conv = pattern_to_output.at(conv).get_node_shared_ptr(); + const auto & m_mul = pattern_to_output.at(mul).get_node_shared_ptr(); + + const auto & channel_dim = m_weights.get_partial_shape()[1].get_length(); + const auto & weights_rank = m_weights.get_partial_shape().rank().get_length(); + const auto & const_shape = m_const.get_shape(); + + bool is_scalar_multiplier(shape_size(const_shape) == 1); + + // Check that constant has shape [1, C, 1, 1] where the number of 1 is equal to + // the number of spatial dimensions. That means that Constant applied per + // channel and can be fused into Convolution weights. + // Also Constant shape rank must be less or equal Convolution output shape + // otherwise fusion will break output broadcasting + auto expected_shape = Shape(weights_rank, 1); + expected_shape[1] = channel_dim; + + if (!check_shapes(expected_shape, const_shape)) { + return false; + } + + // Reshape constant to [O, 1, 1] where the number of 1 is equal to + // the number of weights dimensions minus 1 (input dimension). + // This Reshape aligns Constant shape for multiplication with weights. + Output final_const = m_const; + if (!is_scalar_multiplier) { + auto final_const_shape = Shape(weights_rank - 1, 1); + final_const_shape[0] = channel_dim; + final_const = std::make_shared(m_const, + opset4::Constant::create(ngraph::element::i64, ngraph::Shape{final_const_shape.size()}, + final_const_shape), true); + } + + // Multiply convolution weights with aligned Constant values + auto weights_multiply = std::make_shared(m_weights, final_const); + + // Replace Convolution->Multiply with Convolution with new inputs + auto new_conv = m_conv->copy_with_new_inputs({m_input, weights_multiply}); + new_conv->set_friendly_name(m_mul->get_friendly_name()); + copy_runtime_info({m_conv, m_mul}, {new_conv, final_const.get_node_shared_ptr(), weights_multiply}); + replace_node(m_mul, new_conv); + return true; + }; + + auto m = std::make_shared(mul, "ConvolutionBackpropDataMultiplyFusion"); + register_matcher(m, callback); +} + +ngraph::pass::GroupConvolutionBackpropDataMultiplyFusion::GroupConvolutionBackpropDataMultiplyFusion() { + auto input = pattern::any_input(); + auto weights = ngraph::pattern::any_input(pattern::has_static_dims({0, 2}) /* has GIOYX layout */); + auto conv = ngraph::pattern::wrap_type({input, weights}, pattern::consumers_count(1)); + auto mul_const = ngraph::pattern::wrap_type(pattern::has_static_shape()); + auto mul = ngraph::pattern::wrap_type({conv, mul_const}); + + matcher_pass_callback callback = [conv, input, weights, mul, mul_const](pattern::Matcher & m) -> bool { + const auto & pattern_to_output = m.get_pattern_value_map(); + + const auto & m_weights = pattern_to_output.at(weights); + const auto & m_const = pattern_to_output.at(mul_const); + const auto & m_input = pattern_to_output.at(input); + const auto & m_conv = pattern_to_output.at(conv).get_node_shared_ptr(); + const auto & m_mul = pattern_to_output.at(mul).get_node_shared_ptr(); + + const auto & G = m_weights.get_partial_shape()[0].get_length(); + const auto & O = m_weights.get_partial_shape()[2].get_length(); + const auto & weights_rank = m_weights.get_partial_shape().rank().get_length(); + const auto & const_shape = m_const.get_shape(); + + bool is_scalar_multiplier(shape_size(const_shape) == 1); + + // Check that constant has shape [1, C (G * O), 1, 1] where the number of 1 is equal to + // the number of spatial dimensions. That means that Constant applied per + // channel and can be fused into Convolution weights. + // Also Constant shape rank must be less or equal Convolution output shape + // otherwise fusion will break output broadcasting + auto expected_shape = Shape(weights_rank - 1, 1); + expected_shape[1] = G * O; + + if (!check_shapes(expected_shape, const_shape)) { + return false; + } + + // Reshape constant to [G, 1, O, 1, 1, 1] where the number of 1 is equal to + // the number of weights dimensions. In case of scalar we skip Reshape. + // This Reshape aligns Constant shape for multiplication with weights. + Output final_const = m_const; + if (!is_scalar_multiplier) { + auto final_const_shape = Shape(weights_rank, 1); + final_const_shape[0] = G; + final_const_shape[2] = O; + final_const = std::make_shared(m_const, + opset4::Constant::create(ngraph::element::i64, ngraph::Shape{final_const_shape.size()}, + final_const_shape), true); + } + + // Multiply convolution weights with aligned Constant values + auto weights_multiply = std::make_shared(m_weights, final_const); + + // Replace Convolution->Multiply with Convolution with new inputs + auto new_conv = m_conv->copy_with_new_inputs({m_input, weights_multiply}); + new_conv->set_friendly_name(m_mul->get_friendly_name()); + copy_runtime_info({m_conv, m_mul}, {new_conv, final_const.get_node_shared_ptr(), weights_multiply}); + replace_node(m_mul, new_conv); + return true; + }; + + auto m = std::make_shared(mul, "GroupConvolutionMultiplyFusion"); + register_matcher(m, callback); +} \ No newline at end of file diff --git a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp index c8226c8..7f455c1 100644 --- a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp +++ b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -79,10 +80,6 @@ bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptradd_matcher(); decomp->add_matcher(); decomp->add_matcher(); - decomp->add_matcher(); - decomp->add_matcher(); - decomp->add_matcher(); - decomp->add_matcher(); decomp->add_matcher(); decomp->add_matcher(); decomp->add_matcher(); @@ -95,10 +92,23 @@ bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptr(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + + // Convolution/Deconvolution/FullyConnected fusions + auto convert_convolutions = manager.register_pass(); + convert_convolutions->add_matcher(); + convert_convolutions->add_matcher(); + convert_convolutions->add_matcher(); + convert_convolutions->add_matcher(); + convert_convolutions->set_name("ngraph::pass::ConvertConvolutions"); + // Convolution/Deconvolution/FullyConnected fusions auto fusion = manager.register_pass(); fusion->add_matcher(); - fusion->add_matcher(); fusion->add_matcher(); fusion->add_matcher(); fusion->set_name("ngraph::pass::Fusions"); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/conv_mul_fusion.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/conv_mul_fusion.cpp new file mode 100644 index 0000000..d88c692 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/conv_mul_fusion.cpp @@ -0,0 +1,159 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "subgraph_tests/conv_mul_fusion.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + const std::vector types{ngraph::element::f32, ngraph::element::f16}; + + INSTANTIATE_TEST_CASE_P(Convolution_1D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::Convolution::type_info), + ::testing::Values(ngraph::Shape{1, 8, 64}), + ::testing::Values(ngraph::Shape{64, 8, 1}), + ::testing::Values(ngraph::Shape{64, 1}), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(GroupConvolution_1D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::GroupConvolution::type_info), + ::testing::Values(ngraph::Shape{1, 12, 5}), + ::testing::Values(ngraph::Shape{4, 5, 3, 2}), + ::testing::Values(ngraph::Shape{20, 1}), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(ConvolutionBackpropData_1D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::ConvolutionBackpropData::type_info), + ::testing::Values(ngraph::Shape{1, 12, 64}), + ::testing::Values(ngraph::Shape{12, 20, 1}), + ::testing::Values(ngraph::Shape{20, 1}), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(GroupConvolutionBackpropData_1D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::GroupConvolutionBackpropData::type_info), + ::testing::Values(ngraph::Shape{1, 12, 64}), + ::testing::Values(ngraph::Shape{4, 3, 5, 1}), + ::testing::Values(ngraph::Shape{1, 20, 1}), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + const std::vector const_shapes_2d{ + {}, + {1, 1}, + {1, 1, 1}, + {20, 1, 1}, + {1, 1, 1, 1} + }; + + INSTANTIATE_TEST_CASE_P(Convolution_2D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::Convolution::type_info), + ::testing::Values(ngraph::Shape{1, 3, 64, 64}), + ::testing::Values(ngraph::Shape{20, 3, 1, 1}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(GroupConvolution_2D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::GroupConvolution::type_info), + ::testing::Values(ngraph::Shape{1, 12, 64, 64}), + ::testing::Values(ngraph::Shape{4, 5, 3, 1, 2}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(ConvolutionBackpropData_2D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::ConvolutionBackpropData::type_info), + ::testing::Values(ngraph::Shape{1, 12, 64, 64}), + ::testing::Values(ngraph::Shape{12, 20, 1, 1}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::Values(4/*Param->Conv->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(GroupConvolutionBackpropData_2D, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::GroupConvolutionBackpropData::type_info), + ::testing::Values(ngraph::Shape{1, 12, 64, 64}), + ::testing::Values(ngraph::Shape{4, 3, 5, 1, 1}), + ::testing::ValuesIn(const_shapes_2d), + ::testing::Values(4/*Param->Conv(Weights)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + const std::vector neg_const_shapes_2d{ + {1, 1, 1, 1, 1}, /* Broadcast output */ + {3}, {3, 1}, {3, 1, 1, 1} + }; + + INSTANTIATE_TEST_CASE_P(Convolution_2D_Negative, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::Convolution::type_info), + ::testing::Values(ngraph::Shape{1, 3, 3, 3}), + ::testing::Values(ngraph::Shape{4, 3, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::Values(6/*Param->Conv(Weights)->Multiply(Const)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(GroupConvolution_2D_Negative, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::GroupConvolution::type_info), + ::testing::Values(ngraph::Shape{1, 12, 3, 3}), + ::testing::Values(ngraph::Shape{4, 5, 3, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::Values(6/*Param->Conv(Weights)->Multiply(Const)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(ConvolutionBackpropData_2D_Negative, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::ConvolutionBackpropData::type_info), + ::testing::Values(ngraph::Shape{1, 12, 3, 3}), + ::testing::Values(ngraph::Shape{12, 3, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::Values(6/*Param->Conv(Weights)->Multiply(Const)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); + + INSTANTIATE_TEST_CASE_P(GroupConvolutionBackpropData_2D_Negative, ConvMultiply, + ::testing::Combine( + ::testing::Values(ngraph::opset4::GroupConvolutionBackpropData::type_info), + ::testing::Values(ngraph::Shape{1, 12, 3, 3}), + ::testing::Values(ngraph::Shape{4, 3, 5, 1, 1}), + ::testing::ValuesIn(neg_const_shapes_2d), + ::testing::Values(6/*Param->Conv(Weights)->Multiply(Const)->Result*/), + ::testing::ValuesIn(types), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvMultiply::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_mul_fusion.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_mul_fusion.hpp new file mode 100644 index 0000000..a3f621b --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_mul_fusion.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include +#include +#include +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include +#include + +namespace LayerTestsDefinitions { + +typedef std::tuple< + ngraph::NodeTypeInfo, // Convolution type + ngraph::Shape, // Input shape + ngraph::Shape, // Weights shape + ngraph::Shape, // Const shape + int64_t, // Number of ops in final function + ngraph::element::Type, // Network precision + std::string // Device name + > ConvMultiplyParams; + +class ConvMultiply + : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/conv_mul_fusion.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/conv_mul_fusion.cpp new file mode 100644 index 0000000..bdc4920 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/conv_mul_fusion.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "subgraph_tests/conv_mul_fusion.hpp" +#include +#include +#include + +namespace LayerTestsDefinitions { + +std::string ConvMultiply::getTestCaseName(const testing::TestParamInfo &obj) { + ngraph::NodeTypeInfo conv_type; + ngraph::Shape input_shape, weights_shape, const_shape; + ngraph::element::Type precision; + std::string targetName; + int64_t expected_number_of_ops; + std::tie(conv_type, input_shape, weights_shape, const_shape, expected_number_of_ops, precision, targetName) = obj.param; + std::ostringstream results; + + results << conv_type.name << "_"; + results << "Input" << CommonTestUtils::vec2str(input_shape); + results << "Weights" << CommonTestUtils::vec2str(weights_shape); + results << "Const" << CommonTestUtils::vec2str(const_shape); + results << "netPRC=" << precision << "_"; + results << "targetDevice=" << targetName << "_"; + return results.str(); +} + +void ConvMultiply::SetUp() { + ngraph::NodeTypeInfo conv_type; + ngraph::Shape input_shape, weights_shape, const_shape; + ngraph::element::Type precision; + int64_t expected_number_of_ops; + std::tie(conv_type, input_shape, weights_shape, const_shape, expected_number_of_ops, precision, targetDevice) = this->GetParam(); + + { + auto param = std::make_shared(precision, input_shape); + auto spatial_dims = input_shape.size() - 2; + + ngraph::Shape strides(spatial_dims, 1); + std::vector pad_begin(spatial_dims, 0), pad_end(spatial_dims, 0); + auto weights = ngraph::builder::makeConstant(precision, weights_shape, {}, true); + auto mul_const = ngraph::builder::makeConstant(precision, const_shape, {}, true); + std::shared_ptr conv; + if (conv_type == ngraph::opset4::Convolution::type_info) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ngraph::opset4::GroupConvolution::type_info) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ngraph::opset4::ConvolutionBackpropData::type_info) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ngraph::opset4::GroupConvolutionBackpropData::type_info) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else { + throw ngraph::ngraph_error("Unsupported type"); + } + + auto mul = std::make_shared(conv, mul_const); + + function = std::make_shared(ngraph::OutputVector{mul}, ngraph::ParameterVector{param}, "conv_multiply"); + } + + ngraph::pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.run_passes(function); + + ASSERT_EQ(function->get_ops().size(), expected_number_of_ops); +} + +TEST_P(ConvMultiply, CompareWithRefs) { + Run(); +} +} // namespace LayerTestsDefinitions diff --git a/ngraph/core/include/ngraph/pattern/op/label.hpp b/ngraph/core/include/ngraph/pattern/op/label.hpp index e621ad0..e172f97 100644 --- a/ngraph/core/include/ngraph/pattern/op/label.hpp +++ b/ngraph/core/include/ngraph/pattern/op/label.hpp @@ -145,5 +145,8 @@ namespace ngraph NGRAPH_API std::shared_ptr any_input(); + + NGRAPH_API + std::shared_ptr any_input(const pattern::op::ValuePredicate& pred); } } diff --git a/ngraph/core/include/ngraph/pattern/op/pattern.hpp b/ngraph/core/include/ngraph/pattern/op/pattern.hpp index ca23d39..64b4f98 100644 --- a/ngraph/core/include/ngraph/pattern/op/pattern.hpp +++ b/ngraph/core/include/ngraph/pattern/op/pattern.hpp @@ -52,6 +52,15 @@ namespace ngraph NGRAPH_API std::function)> consumers_count(size_t n); + NGRAPH_API + std::function)> has_static_dim(size_t pos); + + NGRAPH_API + std::function)> has_static_dims(const std::vector& dims); + + NGRAPH_API + std::function)> has_static_shape(); + namespace op { using NodePredicate = std::function)>; diff --git a/ngraph/core/src/pattern/op/label.cpp b/ngraph/core/src/pattern/op/label.cpp index fbc9d8a..456d42b 100644 --- a/ngraph/core/src/pattern/op/label.cpp +++ b/ngraph/core/src/pattern/op/label.cpp @@ -64,4 +64,9 @@ bool pattern::op::Label::match_value(Matcher* matcher, std::shared_ptr pattern::any_input() { return std::make_shared(); +} + +std::shared_ptr pattern::any_input(const pattern::op::ValuePredicate& pred) +{ + return std::make_shared(element::dynamic, PartialShape::dynamic(), pred); } \ No newline at end of file diff --git a/ngraph/core/src/pattern/op/pattern.cpp b/ngraph/core/src/pattern/op/pattern.cpp index 87e078c..d1e13e7 100644 --- a/ngraph/core/src/pattern/op/pattern.cpp +++ b/ngraph/core/src/pattern/op/pattern.cpp @@ -67,5 +67,32 @@ namespace ngraph return [=](Output output) -> bool { return output.get_target_inputs().size() == n; }; } + + std::function)> has_static_dim(size_t pos) + { + return [=](Output output) -> bool { + const auto& shape = output.get_partial_shape(); + return shape.rank().is_static() && shape.rank().get_length() > pos && + shape[pos].is_static(); + }; + } + + std::function)> has_static_dims(const std::vector& dims) + { + return [=](Output output) -> bool { + const auto& shape = output.get_partial_shape(); + return shape.rank().is_static() && + shape.rank().get_length() > *std::max_element(dims.begin(), dims.end()) && + std::all_of(dims.begin(), dims.end(), [&shape](size_t pos) { + return shape[pos].is_static(); + }); + }; + } + + std::function)> has_static_shape() + { + return + [=](Output output) -> bool { return output.get_partial_shape().is_static(); }; + } } } -- 2.7.4