[IE][VPU]: Support for Bidirectional mode in Broadcast DTS (#2873)
authorAndrew Bakalin <andrew.bakalin@intel.com>
Tue, 3 Nov 2020 08:59:06 +0000 (11:59 +0300)
committerGitHub <noreply@github.com>
Tue, 3 Nov 2020 08:59:06 +0000 (11:59 +0300)
* [VPU][DTS] Update broadcast DTS to support BIDIRECTIONAL mode
* [VPU][Tests] Update tests with inference
* [VPU][Tests] Extend DTS tests

inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_broadcast.cpp
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_broadcast.cpp
inference-engine/tests/functional/plugin/myriad/subgraph_tests/nonzero_broadcast.cpp

index cd1a100..15a0b0c 100644 (file)
 
 #include "ngraph/graph_util.hpp"
 #include "ngraph/opsets/opset3.hpp"
+#include "ngraph/opsets/opset5.hpp"
 
 #include <memory>
+#include <algorithm>
 
 namespace vpu {
 
@@ -29,19 +31,68 @@ void dynamicToStaticShapeBroadcast(std::shared_ptr<ngraph::Node> target) {
                 broadcast->input_value(0),
                 broadcast->input_value(1),
                 broadcast->input_value(2));
-    } else if (broadcast->get_broadcast_spec() == ngraph::op::BroadcastType::NUMPY) {
+    } else if (broadcast->get_broadcast_spec() == ngraph::op::BroadcastType::NUMPY ||
+               broadcast->get_broadcast_spec() == ngraph::op::BroadcastType::BIDIRECTIONAL) {
         staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
                 broadcast->input_value(0),
-                broadcast->input_value(1));
+                broadcast->input_value(1),
+                broadcast->get_broadcast_spec());
     } else {
-        VPU_THROW_FORMAT("dynamicToStaticShapeBroadcast supports only explicit and numpy modes,"
+        VPU_THROW_FORMAT("dynamicToStaticShapeBroadcast supports only explicit, numpy and bidirectional modes,"
                          "provided {}", broadcast->get_broadcast_spec().m_type);
     }
 
-    auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
-            staticShapeBroadcast->output(0), broadcast->input_value(1));
-    dsr->set_friendly_name(broadcast->get_friendly_name());
+    std::shared_ptr<ngraph::Node> dsr;
+
+    if (broadcast->get_broadcast_spec() == ngraph::op::BroadcastType::BIDIRECTIONAL) {
+        const auto inputShape = broadcast->get_input_shape(0);
+
+        const auto targetShape = broadcast->input_value(1).get_node_shared_ptr();
+        const auto shapeType = targetShape->get_element_type();
+
+        const auto inputShapeDimsCount = inputShape.size();
+        const auto targetShapeDimsCount = ngraph::shape_size(broadcast->get_input_partial_shape(1).get_shape());
+
+        const auto inputShapeConst = std::make_shared<ngraph::opset5::Constant>(
+            shapeType,
+            ngraph::Shape{static_cast<size_t>(inputShapeDimsCount)},
+            inputShape);
+
+        const auto minRank = std::min(inputShapeDimsCount, targetShapeDimsCount);
+        const auto maxRank = std::max(inputShapeDimsCount, targetShapeDimsCount);
+        const auto minRankNode = minRank == inputShapeDimsCount ? inputShapeConst : targetShape;
+        const auto maxRankNode = minRank == inputShapeDimsCount ? targetShape : inputShapeConst;
+
+        ngraph::NodeVector dims;
 
+        for (int i = 0; i < maxRank - minRank; i++) {
+            dims.push_back(
+                std::make_shared<ngraph::opset5::Gather>(
+                    maxRankNode,
+                    ngraph::opset5::Constant::create(shapeType, ngraph::Shape{1}, {i}),
+                    ngraph::opset5::Constant::create(shapeType, ngraph::Shape{1}, {0})));
+        }
+
+        for (int i = 0; i < minRank; i++) {
+            const auto minRankDim = std::make_shared<ngraph::opset5::Gather>(
+                minRankNode,
+                ngraph::opset5::Constant::create(shapeType, ngraph::Shape{1}, {i}),
+                ngraph::opset5::Constant::create(shapeType, ngraph::Shape{1}, {0}));
+            const auto maxRankDim = std::make_shared<ngraph::opset5::Gather>(
+                maxRankNode,
+                ngraph::opset5::Constant::create(shapeType, ngraph::Shape{1}, {maxRank - minRank + i}),
+                ngraph::opset5::Constant::create(shapeType, ngraph::Shape{1}, {0}));
+            dims.push_back(std::make_shared<ngraph::opset5::Maximum>(minRankDim, maxRankDim));
+        }
+
+        const auto outShape = std::make_shared<ngraph::opset5::Concat>(dims, 0);
+
+        dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(staticShapeBroadcast->output(0), outShape);
+    } else {
+        dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(staticShapeBroadcast->output(0), broadcast->input_value(1));
+    }
+
+    dsr->set_friendly_name(broadcast->get_friendly_name());
     ngraph::replace_node(std::move(target), std::move(dsr));
 }
 
index 428bc19..c8755b7 100644 (file)
@@ -10,6 +10,7 @@
 #include <ngraph_functions/utils/ngraph_helpers.hpp>
 #include <ngraph/function.hpp>
 #include <ngraph/opsets/opset3.hpp>
+#include <ngraph/opsets/opset5.hpp>
 
 #include <common_test_utils/test_common.hpp>
 #include <gtest/gtest.h>
@@ -25,16 +26,16 @@ using TensorType  = ngraph::element::Type;
 using TensorShape = ngraph::PartialShape;
 using AxesMapping = std::vector<size_t>;
 
-struct BroadcastExplicitShapes {
+struct BroadcastShapes {
     TensorShape srcShape;
     TensorShape targetShape;
     AxesMapping axesMapping;
 };
-using BroadcastExplicitTestParams = std::tuple<TensorType, BroadcastExplicitShapes>;
+using BroadcastTestParams = std::tuple<TensorType, BroadcastShapes>;
 
-class DynamicToStaticShapeBroadcastTests
+class DynamicToStaticShapeBroadcastExplicitTests
         : public CommonTestUtils::TestsCommon,
-          public testing::WithParamInterface<BroadcastExplicitTestParams> {
+          public testing::WithParamInterface<BroadcastTestParams> {
 public:
     void SetUp() override {
         const auto& parameters = GetParam();
@@ -44,8 +45,8 @@ public:
         const auto& axesMapping = std::get<1>(parameters).axesMapping;
 
         ngraph::helpers::CompareFunctions(
-                *transform(tensorType, tensorShape, targetShape, axesMapping),
-                *reference(tensorType, tensorShape, targetShape, axesMapping));
+            *transform(tensorType, tensorShape, targetShape, axesMapping),
+            *reference(tensorType, tensorShape, targetShape, axesMapping));
     }
 
 protected:
@@ -54,37 +55,32 @@ protected:
             const TensorShape& tensorShape,
             const TensorShape& targetShape,
             const AxesMapping& axesMapping) const {
-        const auto tensorParam = std::make_shared<ngraph::opset3::Parameter>(
-                tensorType, tensorShape);
-        const auto tensorWithTargetShapeParam = std::make_shared<ngraph::opset3::Parameter>(
-                tensorType, targetShape);
+        const auto tensorParam = std::make_shared<ngraph::opset3::Parameter>(tensorType, tensorShape);
+        const auto tensorWithTargetShapeParam = std::make_shared<ngraph::opset3::Parameter>(tensorType, targetShape);
 
         const auto shapeOfNode = std::make_shared<ngraph::opset3::ShapeOf>(tensorWithTargetShapeParam);
         shapeOfNode->set_is_foldable(false);
 
         const auto axesMappingConstant = std::make_shared<ngraph::opset3::Constant>(
-                ngraph::element::u64, ngraph::Shape{axesMapping.size()}, axesMapping);
+            ngraph::element::u64,
+            ngraph::Shape{axesMapping.size()},
+            axesMapping);
 
-        const auto broadcast = std::make_shared<ngraph::opset3::Broadcast>(
-                tensorParam, shapeOfNode, axesMappingConstant);
+        const auto broadcast = std::make_shared<ngraph::opset3::Broadcast>(tensorParam, shapeOfNode, axesMappingConstant);
 
         auto function = std::make_shared<ngraph::Function>(
-                ngraph::NodeVector{broadcast},
-                ngraph::ParameterVector{tensorParam, tensorWithTargetShapeParam},
-                "Actual");
+            ngraph::NodeVector{broadcast},
+            ngraph::ParameterVector{tensorParam, tensorWithTargetShapeParam},
+            "Actual");
 
         // We need to set broadcast output shape to make its rank static.
         // In opset3::Broadcast implementation with Explicit mode output shape gets
         // static rank only in cases when the second input is Concat
-        std::vector<ngraph::Dimension> broadcastOutShape(
-                shapeOfNode->get_output_shape(0)[0], ngraph::Dimension::dynamic());
-        broadcast->set_output_type(0, tensorParam->get_output_element_type(0),
-                                   ngraph::PartialShape(broadcastOutShape));
-        function->get_result()->set_output_type(0, tensorParam->get_output_element_type(0),
-                                                targetShape);
-
-        const auto transformations = vpu::Transformations{{
-            ngraph::opset3::Broadcast::type_info, vpu::dynamicToStaticShapeBroadcast}};
+        std::vector<ngraph::Dimension> broadcastOutShape(shapeOfNode->get_output_shape(0)[0], ngraph::Dimension::dynamic());
+        broadcast->set_output_type(0, tensorParam->get_output_element_type(0), ngraph::PartialShape(broadcastOutShape));
+        function->get_result()->set_output_type(0, tensorParam->get_output_element_type(0), targetShape);
+
+        const auto transformations = vpu::Transformations{{ngraph::opset3::Broadcast::type_info, vpu::dynamicToStaticShapeBroadcast}};
         vpu::DynamicToStaticShape(transformations).run_on_function(function);
         return function;
     }
@@ -94,40 +90,154 @@ protected:
             const TensorShape& tensorShape,
             const TensorShape& targetShape,
             const AxesMapping& axesMapping) const {
-        const auto tensorParam = std::make_shared<ngraph::opset3::Parameter>(
-                tensorType, tensorShape);
-        const auto tensorWithTargetShapeParam = std::make_shared<ngraph::opset3::Parameter>(
-                tensorType, targetShape);
+        const auto tensorParam = std::make_shared<ngraph::opset3::Parameter>(tensorType, tensorShape);
+        const auto tensorWithTargetShapeParam = std::make_shared<ngraph::opset3::Parameter>(tensorType, targetShape);
         const auto shapeOf = std::make_shared<ngraph::opset3::ShapeOf>(tensorWithTargetShapeParam);
 
         const auto axesMappingConstant = std::make_shared<ngraph::opset3::Constant>(
-                ngraph::element::u64, ngraph::Shape{axesMapping.size()}, axesMapping);
+            ngraph::element::u64,
+            ngraph::Shape{axesMapping.size()},
+            axesMapping);
+
+        const auto staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(tensorParam, shapeOf, axesMappingConstant);
+
+        const auto dsrOut = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(staticShapeBroadcast, shapeOf);
+        return std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsrOut},
+            ngraph::ParameterVector{tensorParam, tensorWithTargetShapeParam},
+            "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeBroadcastExplicitTests, compareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(smoke_NGraph, DynamicToStaticShapeBroadcastExplicitTests, testing::Combine(
+        testing::Values(
+            ngraph::element::f16,
+            ngraph::element::f32,
+            ngraph::element::i32,
+            ngraph::element::i64,
+            ngraph::element::u8),
+        testing::Values(
+            BroadcastShapes{TensorShape{16}, TensorShape{1, 16, 50, 50}, AxesMapping{1}},
+            BroadcastShapes{TensorShape{50, 50}, TensorShape{1, 50, 50, 16}, AxesMapping{1, 2}})
+
+));
+
+class DynamicToStaticShapeBroadcastBidirectionalTests : public CommonTestUtils::TestsCommon,
+                                                        public testing::WithParamInterface<BroadcastTestParams> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& tensorType  = std::get<0>(parameters);
+        const auto& tensorShape = std::get<1>(parameters).srcShape;
+        const auto& targetShape = std::get<1>(parameters).targetShape;
+
+        ngraph::helpers::CompareFunctions(
+            *transform(tensorType, tensorShape, targetShape),
+            *reference(tensorType, tensorShape, targetShape));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const TensorType& tensorType,
+            const TensorShape& tensorShape,
+            const TensorShape& targetShape) const {
+        const auto tensorParam = std::make_shared<ngraph::opset5::Parameter>(tensorType, tensorShape);
+        const auto tensorWithTargetShapeParam = std::make_shared<ngraph::opset5::Parameter>(tensorType, targetShape);
+
+        const auto shapeOfNode = std::make_shared<ngraph::opset5::ShapeOf>(tensorWithTargetShapeParam);
+        shapeOfNode->set_is_foldable(false);
+
+        const auto broadcast = std::make_shared<ngraph::opset5::Broadcast>(tensorParam, shapeOfNode, ngraph::op::BroadcastType::BIDIRECTIONAL);
+
+        auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{broadcast},
+            ngraph::ParameterVector{tensorParam, tensorWithTargetShapeParam},
+            "Actual");
+
+        const auto transformations = vpu::Transformations{{ngraph::opset5::Broadcast::type_info, vpu::dynamicToStaticShapeBroadcast}};
+        vpu::DynamicToStaticShape(transformations).run_on_function(function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const TensorType& tensorType,
+            const TensorShape& tensorShape,
+            const TensorShape& targetShape) const {
+        const auto tensorParam = std::make_shared<ngraph::opset5::Parameter>(tensorType, tensorShape);
+        const auto tensorWithTargetShapeParam = std::make_shared<ngraph::opset5::Parameter>(tensorType, targetShape);
+        std::shared_ptr<ngraph::Node> shapeOf = std::make_shared<ngraph::opset5::ShapeOf>(tensorWithTargetShapeParam);
 
         const auto staticShapeBroadcast = std::make_shared<ngraph::vpu::op::StaticShapeBroadcast>(
-                tensorParam, shapeOf, axesMappingConstant);
+            tensorParam,
+            shapeOf,
+            ngraph::op::BroadcastType::BIDIRECTIONAL);
+
+        const auto tensorShapeDimsCount = tensorShape.rank().get_length();
+        const auto targetShapeDimsCount = targetShape.rank().get_length();
+
+        std::shared_ptr<ngraph::Node> tensorShapeConst = std::make_shared<ngraph::opset5::Constant>(
+            ngraph::element::i64,
+            ngraph::Shape{static_cast<size_t>(tensorShapeDimsCount)},
+            tensorShape.get_shape());
+
+        const auto maxRankNode = tensorShapeDimsCount > targetShapeDimsCount ? tensorShapeConst : shapeOf;
+        const auto minRankNode = maxRankNode == tensorShapeConst ? shapeOf : tensorShapeConst;
+        const auto maxRank = maxRankNode == tensorShapeConst ? tensorShapeDimsCount : targetShapeDimsCount;
+        const auto minRank = minRankNode == tensorShapeConst ? tensorShapeDimsCount : targetShapeDimsCount;
+
+        ngraph::NodeVector dims;
+
+        for (int i = 0; i < maxRank - minRank; i++) {
+            dims.push_back(
+                std::make_shared<ngraph::opset5::Gather>(
+                    maxRankNode,
+                    ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {i}),
+                    ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0})));
+        }
+
+        for (int i = 0; i < minRank; i++) {
+            const auto minRankDim = std::make_shared<ngraph::opset5::Gather>(
+                minRankNode,
+                ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {i}),
+                ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}));
+            const auto maxRankDim = std::make_shared<ngraph::opset5::Gather>(
+                maxRankNode,
+                ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {maxRank - minRank + i}),
+                ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}));
+            dims.push_back(std::make_shared<ngraph::opset5::Maximum>(minRankDim, maxRankDim));
+        }
+
+        const auto outShape = std::make_shared<ngraph::opset5::Concat>(dims, 0);
 
         const auto dsrOut = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
-                staticShapeBroadcast, shapeOf);
+            staticShapeBroadcast->output(0), outShape);
         return std::make_shared<ngraph::Function>(
-                ngraph::NodeVector{dsrOut},
-                ngraph::ParameterVector{tensorParam, tensorWithTargetShapeParam},
-                "Expected");
+            ngraph::NodeVector{dsrOut},
+            ngraph::ParameterVector{tensorParam, tensorWithTargetShapeParam},
+            "Expected");
     }
 };
 
-TEST_P(DynamicToStaticShapeBroadcastTests, compareFunctions) {
+TEST_P(DynamicToStaticShapeBroadcastBidirectionalTests, compareFunctions) {
 }
 
-INSTANTIATE_TEST_CASE_P(smoke_NGraph, DynamicToStaticShapeBroadcastTests, testing::Combine(
+INSTANTIATE_TEST_CASE_P(smoke_NGraph, DynamicToStaticShapeBroadcastBidirectionalTests, testing::Combine(
         testing::Values(
-                ngraph::element::f16,
-                ngraph::element::f32,
-                ngraph::element::i32,
-                ngraph::element::i64,
-                ngraph::element::u8),
+            ngraph::element::f16,
+            ngraph::element::f32,
+            ngraph::element::i32,
+            ngraph::element::i64,
+            ngraph::element::u8),
         testing::Values(
-                BroadcastExplicitShapes{TensorShape{16}, TensorShape{1, 16, 50, 50}, AxesMapping{1}},
-                BroadcastExplicitShapes{TensorShape{50, 50}, TensorShape{1, 50, 50, 16}, AxesMapping{1, 2}})
+            BroadcastShapes{TensorShape{1, 1, 4}, TensorShape{300, 2, 4}, {}},
+            BroadcastShapes{TensorShape{15,  1}, TensorShape{2, 16, 15, 14}, {}},
+            BroadcastShapes{TensorShape{2, 16, 15, 14}, TensorShape{15, 14}, {}},
+            BroadcastShapes{TensorShape{2, 16, 15, 14}, TensorShape{16,  1,  1}, {}},
+            BroadcastShapes{TensorShape{2, 16, 15, 14}, TensorShape{16,  1, 14}, {}},
+            BroadcastShapes{TensorShape{16, 15,  1}, TensorShape{2, 1, 15, 14}, {}})
 
 ));
 
index 5844351..5b62d68 100644 (file)
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
-
-#include "vpu/private_plugin_config.hpp"
+#include "dsr_tests_common.hpp"
 
-#include "common/myriad_common_test_utils.hpp"
-#include <functional_test_utils/layer_test_utils.hpp>
 #include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
 
 namespace {
 
+using namespace LayerTestsUtils::vpu;
+
 using TensorType  = ngraph::element::Type;
 using TensorShape = ngraph::Shape;
 
-using BroadcastExplicitTestParams = std::tuple<
-        TensorType, TensorShape, LayerTestsUtils::TargetDevice>;
+struct BroadcastInputParams {
+    TensorShape inputShape;
+    DataShapeWithUpperBound targetShape;
+    InferenceEngine::SizeVector axesMapping;
+};
+
+using BroadcastTestParams = std::tuple<
+        BroadcastInputParams, TensorType, LayerTestsUtils::TargetDevice>;
 
-class NonZero_Broadcast : public testing::WithParamInterface<BroadcastExplicitTestParams>,
-                          virtual public LayerTestsUtils::LayerTestsCommon {
+
+class NonZero_BroadcastBidirectional : public testing::WithParamInterface<BroadcastTestParams>,
+                                       virtual public LayerTestsUtils::LayerTestsCommon {
 protected:
-    void SetUp() override {
-        SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
-        configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
-        // DISABLE_REORDER is needed for Myriad2 cases
-        if (CommonTestUtils::vpu::CheckMyriad2()) {
-            configuration[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
+    size_t getDynamicAxis(const DataShape& shapeA, const DataShape& shapeB) const {
+        size_t res = 0;
+        while (shapeA[res] == shapeB[res]) {
+            res++;
         }
+        return res;
+    }
+
+    void prepareBroadcastInputs() {
+        SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
 
         const auto& parameters = GetParam();
-        const auto& tensorType  = std::get<0>(parameters);
-        const auto& tensorShape = std::get<1>(parameters);
+        const auto& broadcastParams = std::get<0>(parameters);
+        const auto& tensorType = std::get<1>(parameters);
         targetDevice = std::get<2>(GetParam());
 
-        const auto tensorParam = std::make_shared<ngraph::opset3::Parameter>(
-                tensorType, tensorShape);
-        const auto nonZero = std::make_shared<ngraph::opset3::NonZero>(tensorParam);
-        const auto shapeOfNonZero = std::make_shared<ngraph::opset3::ShapeOf>(nonZero);
+        const auto& upperBoundShape = broadcastParams.targetShape.upperBoundShape;
+        const auto& realShape = broadcastParams.targetShape.shape;
+
+        const auto dynamicAxis = getDynamicAxis(upperBoundShape, realShape);
+
+        m_param = std::make_shared<ngraph::opset5::Parameter>(tensorType, TensorShape{upperBoundShape[dynamicAxis]});
+        m_nonZero = std::make_shared<ngraph::opset5::NonZero>(m_param);
+        const auto shapeOfNonZero = std::make_shared<ngraph::opset5::ShapeOf>(m_nonZero);
+        const auto numNonZeros = std::make_shared<ngraph::opset5::Gather>(
+            shapeOfNonZero,
+            ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}),
+            ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}));
+
+        m_broadcastTargetShape = numNonZeros;
+
+        if (dynamicAxis > 0) {
+            m_broadcastTargetShape = std::make_shared<ngraph::opset5::Concat>(
+                ngraph::NodeVector{
+                    ngraph::opset5::Constant::create(
+                        ngraph::element::i64,
+                        ngraph::Shape{dynamicAxis},
+                        std::vector<size_t>{upperBoundShape.begin(), upperBoundShape.begin() + dynamicAxis}),
+                    m_broadcastTargetShape},
+                0);
+        }
+
+        if (dynamicAxis < upperBoundShape.size() - 1) {
+            m_broadcastTargetShape = std::make_shared<ngraph::opset5::Concat>(
+                ngraph::NodeVector{
+                    m_broadcastTargetShape,
+                    ngraph::opset5::Constant::create(
+                        ngraph::element::i64,
+                        ngraph::Shape{upperBoundShape.size() - dynamicAxis - 1},
+                        std::vector<size_t>{upperBoundShape.begin() + dynamicAxis + 1, upperBoundShape.end()})},
+                0);
+        }
+
+        m_broadcastInput = ngraph::builder::makeConstant(tensorType, ngraph::Shape{broadcastParams.inputShape}, std::vector<int64_t>{}, true);
+    }
+
+    void SetUp() override {
+        prepareBroadcastInputs();
+
+        const auto broadcast = std::make_shared<ngraph::opset5::Broadcast>(m_broadcastInput, m_broadcastTargetShape, ngraph::op::BroadcastType::BIDIRECTIONAL);
 
-        const auto broadcastConstant = std::make_shared<ngraph::opset3::Constant>(
-                tensorType, ngraph::Shape{tensorShape.size()}, 1);
-        const auto axesMappingConstant = std::make_shared<ngraph::opset3::Constant>(
-                ngraph::element::u64, ngraph::Shape{1}, 0);
-        const auto broadcast = std::make_shared<ngraph::opset3::Broadcast>(
-                broadcastConstant, shapeOfNonZero, axesMappingConstant);
+        function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{broadcast, m_nonZero},
+            ngraph::ParameterVector{m_param},
+            "NonZero-Broadcast");
+    }
+
+    InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
+        // We emulate dynamic shape through the number of non-zeros in NonZero input tensor
+        const auto& broadcastParams = std::get<0>(GetParam());
+        const auto numNonZeros = broadcastParams.targetShape.shape[getDynamicAxis(
+            broadcastParams.targetShape.upperBoundShape,
+            broadcastParams.targetShape.shape)];
+
+        auto tensorDesc = info.getTensorDesc();
+        auto blob = make_blob_with_precision(tensorDesc);
+        blob->allocate();
+        CommonTestUtils::fill_data_const(blob, 0);
 
-        const auto resultBroadcast = std::make_shared<ngraph::opset3::Result>(broadcast);
-        const auto resultNonZero = std::make_shared<ngraph::opset3::Result>(nonZero->output(0));
+        InferenceEngine::SizeVector newDims = {numNonZeros};
+        blob->getTensorDesc().setDims(newDims);
+        CommonTestUtils::fill_data_const(blob, 1);
+
+        blob->getTensorDesc().setDims(tensorDesc.getDims());
+
+        return blob;
+    }
+
+protected:
+    std::shared_ptr<ngraph::Node> m_broadcastInput;
+    std::shared_ptr<ngraph::Node> m_broadcastTargetShape;
+    std::shared_ptr<ngraph::opset5::NonZero> m_nonZero;
+    std::shared_ptr<ngraph::opset5::Parameter> m_param;
+};
+
+TEST_P(NonZero_BroadcastBidirectional, CompareWithReference) {
+    Run();
+}
+
+std::vector<BroadcastInputParams> broadcastBidirectionalTestParams = {
+        { {1, 1, 4}, DataShapeWithUpperBound{ {200, 2, 4}, {300, 2, 4} }, {} },
+        { {15, 14}, DataShapeWithUpperBound{ {2, 16, 1, 14}, {2, 16, 15, 14} }, {} },
+        { {15, 1}, DataShapeWithUpperBound{ {1, 16, 15, 14}, {2, 16, 15, 14} }, {} },
+        { {2, 16, 15, 14}, DataShapeWithUpperBound{ {1, 15, 14}, {16, 15, 14} }, {} },
+        { {2, 16, 15, 14}, DataShapeWithUpperBound{ {16,  1,  1}, {16,  1,  14}}, {} },
+        { {16, 15,  1}, DataShapeWithUpperBound{ {2, 1, 15, 14}, {2, 16, 15, 14} }, {} },
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_DynamicBroadcast, NonZero_BroadcastBidirectional,
+        ::testing::Combine(
+            ::testing::ValuesIn(broadcastBidirectionalTestParams),
+            ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+            ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+using BroadcastExplicitTestParams = std::tuple<
+        BroadcastTestParams, TensorShape, TensorType, LayerTestsUtils::TargetDevice>;
+
+class NonZero_BroadcastExplicit : public NonZero_BroadcastBidirectional {
+protected:
+    void SetUp() override {
+        prepareBroadcastInputs();
+
+        const auto& axesMapping = std::get<0>(GetParam()).axesMapping;
+        const auto axesMappingConst = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{axesMapping.size()}, axesMapping);
+
+        const auto broadcast = std::make_shared<ngraph::opset5::Broadcast>(m_broadcastInput, m_broadcastTargetShape, axesMappingConst);
 
         function = std::make_shared<ngraph::Function>(
-                ngraph::ResultVector{resultBroadcast, resultNonZero},
-                ngraph::ParameterVector{tensorParam},
-                "NonZero-Broadcast");
+            ngraph::NodeVector{broadcast, m_nonZero},
+            ngraph::ParameterVector{m_param},
+            "NonZero-Broadcast");
     }
 };
 
-TEST_P(NonZero_Broadcast, CompareWithReference) {
+TEST_P(NonZero_BroadcastExplicit, CompareWithReference) {
     Run();
 }
 
-INSTANTIATE_TEST_CASE_P(smoke_DynamicBroadcast, NonZero_Broadcast, ::testing::Combine(
-        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
-        ::testing::Values(
-                TensorShape{1000},
-                TensorShape{4, 1000},
-                TensorShape{3, 128, 256}),
-        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+std::vector<BroadcastInputParams> broadcastExplicitTestParams = {
+        { {1}, DataShapeWithUpperBound{ {1, 800}, {1, 1000} }, {0} },
+        { {4}, DataShapeWithUpperBound{ {100, 4}, {1000, 4} }, {1} },
+        { {128, 256}, DataShapeWithUpperBound{ {1, 128, 256}, {3, 128, 256} }, {1, 2} },
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_DynamicBroadcast, NonZero_BroadcastExplicit,
+        ::testing::Combine(
+            ::testing::ValuesIn(broadcastExplicitTestParams),
+            ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+            ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
 
 }  // namespace