[ DTS ] MatMul (#974)
authorEvgenya Stepyreva <evgenya.stepyreva@intel.com>
Tue, 23 Jun 2020 06:58:03 +0000 (09:58 +0300)
committerGitHub <noreply@github.com>
Tue, 23 Jun 2020 06:58:03 +0000 (09:58 +0300)
* [ DTS ] MatMul

* [ TESTS ] Dynamic MatMul inference test disabled

inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_matmul.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape.cpp
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_matmul.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_matmul.cpp [new file with mode: 0644]

diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_matmul.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_matmul.hpp
new file mode 100644 (file)
index 0000000..644b9ee
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeMatMul(std::shared_ptr<ngraph::Node> node);
+
+}  // namespace vpu
index 6843d5a..ca1d961 100644 (file)
@@ -2,24 +2,25 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
+#include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_broadcast.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_concat.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_gather.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_matmul.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_reduce.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_reshape.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_shapeof.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_strided_slice.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_topk.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_variadic_split.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_strided_slice.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_gather.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_shapeof.hpp"
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_reshape.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_variadic_split.hpp"
 
 #include "vpu/utils/error.hpp"
 
@@ -87,6 +88,7 @@ const Transformations& getDefaultTransformations() {
         {ngraph::opset3::ROIAlign::type_info,  dynamicToStaticShapeROIAlign},
         {ngraph::opset3::Reshape::type_info,   dynamicToStaticShapeReshape},
         {ngraph::opset3::Broadcast::type_info, dynamicToStaticShapeBroadcast},
+        {ngraph::opset3::MatMul::type_info, dynamicToStaticShapeMatMul},
 
         // reduction
         {ngraph::opset3::ReduceLogicalAnd::type_info, dynamicToStaticShapeReduce},
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_matmul.cpp
new file mode 100644 (file)
index 0000000..8fdd03f
--- /dev/null
@@ -0,0 +1,92 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_matmul.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/opsets/opset3.hpp"
+
+#include <memory>
+#include <numeric>
+
+namespace vpu {
+
+void get_normalized_shape(ngraph::Output<ngraph::Node> & shape, size_t actual_rank_value, size_t max_rank_value, bool transpose) {
+    if (const unsigned rank_diff = max_rank_value - actual_rank_value) {
+        ngraph::OutputVector extended_shape_parts =
+                {ngraph::opset3::Constant::create(ngraph::element::i64, {rank_diff}, std::vector<int64_t>(rank_diff, 1)), shape};
+        shape = std::make_shared<ngraph::opset3::Concat>(extended_shape_parts, 0);
+    }
+    if (transpose) {
+        std::vector<int64_t> indices_value(max_rank_value);
+        std::iota(indices_value.begin(), indices_value.end(), 0);
+        std::iter_swap(indices_value.rbegin(), indices_value.rbegin() + 1);
+        const auto indices = ngraph::opset3::Constant::create(ngraph::element::i64, {indices_value.size()}, indices_value);
+        const auto axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+        shape = std::make_shared<ngraph::opset3::Gather>(shape, indices, axis);
+    }
+}
+
+void dynamicToStaticShapeMatMul(std::shared_ptr<ngraph::Node> target) {
+    const auto matmul = ngraph::as_type_ptr<ngraph::opset3::MatMul>(target);
+    VPU_THROW_UNLESS(matmul, "dynamicToStaticShapeMatMul transformation is not applicable for {}, it should be {} instead",
+            target, ngraph::opset3::MatMul::type_info);
+
+    auto shapeToConstant = [&target](const ngraph::Output<ngraph::Node> & output) -> std::shared_ptr<ngraph::opset3::Constant> {
+        VPU_THROW_UNLESS(output.get_partial_shape().is_static(),
+                         "DynamicToStaticShape transformation for {} of type {} expects static shape on inputs without DSR",
+                         target->get_friendly_name(), target->get_type_info());
+        return ngraph::opset3::Constant::create(ngraph::element::i64, {output.get_shape().size()}, output.get_shape());
+    };
+
+    const auto a_input_DSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(target->input_value(0).get_node_shared_ptr());
+    const auto b_input_DSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(target->input_value(1).get_node_shared_ptr());
+    VPU_THROW_UNLESS(a_input_DSR || b_input_DSR, "DynamicToStaticShape transformation for {} of type {} expects at least one DSR as input",
+                     target->get_friendly_name(), target->get_type_info());
+    ngraph::Output<ngraph::Node> a_input_shape = a_input_DSR ? a_input_DSR->input_value(1) : shapeToConstant(target->input_value(0));
+    ngraph::Output<ngraph::Node> b_input_shape = b_input_DSR ? b_input_DSR->input_value(1) : shapeToConstant(target->input_value(1));
+
+    const auto& a_rank = a_input_shape.get_partial_shape();
+    const auto& b_rank = b_input_shape.get_partial_shape();
+    VPU_THROW_UNLESS(a_rank.is_static() && b_rank.is_static(), "DynamicToStaticShape transformation for {} doesn't support dynamic rank", matmul);
+    const auto a_rank_value = a_rank[0].get_length();
+    const auto b_rank_value = b_rank[0].get_length();
+    const auto max_rank_value = std::max(ngraph::Dimension::value_type(2), std::max(a_rank_value, b_rank_value));
+
+    get_normalized_shape(a_input_shape, a_rank_value, max_rank_value, matmul->get_transpose_a());
+    get_normalized_shape(b_input_shape, b_rank_value, max_rank_value, matmul->get_transpose_b());
+
+    ngraph::OutputVector output_dims;
+    if (max_rank_value > 2) {
+        // batch broadcasting
+        const auto max_shape = std::make_shared<ngraph::opset3::Maximum>(a_input_shape, b_input_shape);
+        std::vector<int64_t> indices_value(max_rank_value - 2);
+        std::iota(indices_value.begin(), indices_value.end(), 0);
+        const auto indices = ngraph::opset3::Constant::create(ngraph::element::i64, {indices_value.size()}, indices_value);
+        const auto axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+        const auto batch_dims = std::make_shared<ngraph::opset3::Gather>(max_shape, indices, axis);
+        output_dims.push_back(batch_dims);
+    }
+    const auto input_channels = std::make_shared<ngraph::opset3::Gather>(
+            a_input_shape,
+            ngraph::opset3::Constant::create(ngraph::element::i64, {1}, {max_rank_value - 2}),
+            ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0}));
+    const auto output_channels = std::make_shared<ngraph::opset3::Gather>(
+            b_input_shape,
+            ngraph::opset3::Constant::create(ngraph::element::i64, {1}, {max_rank_value - 1}),
+            ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0}));
+    output_dims.push_back(input_channels);
+    output_dims.push_back(output_channels);
+
+    const auto output_shape = std::make_shared<ngraph::opset3::Concat>(output_dims, 0);
+    const auto copied = target->clone_with_new_inputs(target->input_values());
+    auto outDsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, output_shape);
+    outDsr->set_friendly_name(target->get_friendly_name());
+    ngraph::replace_node(target, outDsr);
+}
+
+}  // namespace vpu
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_matmul.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_matmul.cpp
new file mode 100644 (file)
index 0000000..1165fef
--- /dev/null
@@ -0,0 +1,209 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/shape.hpp>
+#include <ngraph/type/element_type.hpp>
+
+#include <common_test_utils/test_common.hpp>
+
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_matmul.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+
+namespace {
+
+enum DYNAMISM_MODE {
+    BOTH_INPUTS_DYNAMIC,
+    A_INPUT_DYNAMIC,
+    B_INPUT_DYNAMIC
+};
+
+struct MatMul_input_setup {
+    ngraph::Shape shape;
+    bool transpose;
+    // data for MatMul shape normalization and calculation
+    uint64_t rank_diff;
+    std::vector<int64_t> gather_idxs_for_transpose, batch_gather_idxs;
+    int64_t channel_idx;
+};
+
+struct MatMulTestCase {
+    MatMul_input_setup A, B;
+};
+
+const auto combinations = testing::Combine(
+    testing::Values(
+            DYNAMISM_MODE::BOTH_INPUTS_DYNAMIC,
+            DYNAMISM_MODE::A_INPUT_DYNAMIC,
+            DYNAMISM_MODE::B_INPUT_DYNAMIC),
+    testing::Values(
+            ngraph::element::f16,
+            ngraph::element::f32,
+            ngraph::element::i32,
+            ngraph::element::i64,
+            ngraph::element::u8),
+    testing::Values(
+// JIRA: 33925           MatMulTestCase{{{1024}, false, 1, {}, {}, 0}, {{1024, 1000}, false, 0, {}, {}, 1}},
+// JIRA: 33925           MatMulTestCase{{{1024}, true, 1, {1, 0}, {}, 0}, {{1, 1000}, false, 0, {}, {}, 1}},
+            MatMulTestCase{{{5, 10, 1024}, false, 0, {}, {0}, 1}, {{1024, 1000}, false, 1, {}, {0}, 2}},
+            MatMulTestCase{{{5, 10, 1024}, false, 0, {}, {0}, 1}, {{1, 1024, 1000}, false, 0, {}, {0}, 2}},
+            MatMulTestCase{{{5, 1024, 10}, true, 0, {0, 2, 1}, {0}, 1}, {{1, 1000, 1024}, true, 0, {0, 2, 1}, {0}, 2}},
+            MatMulTestCase{{{3, 1024, 10}, true, 1, {0, 1, 3, 2}, {0, 1}, 2}, {{5, 1, 1000, 1024}, true, 0, {0, 1, 3, 2}, {0, 1}, 3}}));
+
+
+class DynamicToStaticShapeMatMul: public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DYNAMISM_MODE, ngraph::element::Type_t, MatMulTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& mode = std::get<0>(parameters);
+        const auto& data_type = std::get<1>(parameters);
+        const auto& matmul_setup = std::get<2>(parameters);
+
+        ngraph::helpers::CompareFunctions(*transform(mode, data_type, matmul_setup),
+                                          *reference(mode, data_type, matmul_setup));
+    }
+
+protected:
+    ngraph::ParameterVector setting_up_input_dynamism(
+            const DYNAMISM_MODE mode,
+            const std::shared_ptr<ngraph::opset3::Parameter> input_A,
+            const std::shared_ptr<ngraph::opset3::Parameter> input_B,
+            std::shared_ptr<ngraph::Node>& renewed_input_A,
+            std::shared_ptr<ngraph::Node>& renewed_input_B,
+            std::shared_ptr<ngraph::Node>& A_shape_node,
+            std::shared_ptr<ngraph::Node>& B_shape_node) const {
+        ngraph::ParameterVector parameters{input_A, input_B};
+
+        auto input_A_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_A->get_shape().size()});
+        auto input_B_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_B->get_shape().size()});
+
+        auto dsr_A = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input_A, input_A_dsr);
+        auto dsr_B = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input_B, input_B_dsr);
+
+        switch (mode) {
+            case DYNAMISM_MODE::BOTH_INPUTS_DYNAMIC: {
+                parameters.push_back(input_A_dsr);
+                parameters.push_back(input_B_dsr);
+                renewed_input_A = dsr_A;
+                renewed_input_B = dsr_B;
+                A_shape_node = input_A_dsr;
+                B_shape_node = input_B_dsr;
+                break;
+            }
+            case DYNAMISM_MODE::A_INPUT_DYNAMIC: {
+                parameters.push_back(input_A_dsr);
+                renewed_input_A = dsr_A;
+                renewed_input_B = input_B;
+                A_shape_node = input_A_dsr;
+                B_shape_node = ngraph::opset3::Constant::create(ngraph::element::i64, {input_B->get_shape().size()}, input_B->get_shape());
+                break;
+            }
+            case DYNAMISM_MODE::B_INPUT_DYNAMIC: {
+                parameters.push_back(input_B_dsr);
+                renewed_input_A = input_A;
+                renewed_input_B = dsr_B;
+                A_shape_node = ngraph::opset3::Constant::create(ngraph::element::i64, {input_A->get_shape().size()}, input_A->get_shape());
+                B_shape_node = input_B_dsr;
+                break;
+            }
+            default:
+               NGRAPH_UNREACHABLE("UNKNOWN DYNAMISM MODE for MatMul DSR graph comparison test");
+        }
+        return parameters;
+    }
+
+    std::shared_ptr<const ngraph::Function> transform(
+            const DYNAMISM_MODE mode,
+            const ngraph::element::Type_t& data_type,
+            const MatMulTestCase& matmul_setup) const {
+        auto input_A = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.A.shape);
+        auto input_B = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.B.shape);
+
+        std::shared_ptr<ngraph::Node> explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape;
+        const auto parameters = setting_up_input_dynamism(mode, input_A, input_B, explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape);
+        const auto node = std::make_shared<ngraph::opset3::MatMul>(explicit_A_input, explicit_B_input, matmul_setup.A.transpose, matmul_setup.B.transpose);
+
+        const auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{node}, parameters, "Actual");
+        node->set_output_type(0, node->get_output_element_type(0), ngraph::PartialShape::dynamic(node->get_output_partial_shape(0).rank()));
+
+        const auto transformations = vpu::Transformations{{ngraph::opset3::MatMul::type_info, vpu::dynamicToStaticShapeMatMul}};
+        vpu::DynamicToStaticShape(transformations).transform(function);
+        return function;
+    }
+
+    std::shared_ptr<ngraph::Function> reference(
+            const DYNAMISM_MODE mode,
+            const ngraph::element::Type_t& data_type,
+            const MatMulTestCase& matmul_setup) {
+        auto input_A = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.A.shape);
+        auto input_B = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.B.shape);
+        std::shared_ptr<ngraph::Node> explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape;
+        const auto parameters = setting_up_input_dynamism(mode, input_A, input_B, explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape);
+        const auto node = std::make_shared<ngraph::opset3::MatMul>(explicit_A_input, explicit_B_input, matmul_setup.A.transpose, matmul_setup.B.transpose);
+
+        // A
+        if (matmul_setup.A.rank_diff) {
+            ngraph::OutputVector extended_shape_parts = {
+                    ngraph::opset3::Constant::create(
+                            ngraph::element::i64, {matmul_setup.A.rank_diff}, std::vector<int64_t>(matmul_setup.A.rank_diff, 1)), normalized_A_shape};
+            normalized_A_shape = std::make_shared<ngraph::opset3::Concat>(extended_shape_parts, 0);
+        }
+        if (!matmul_setup.A.gather_idxs_for_transpose.empty()) {
+            const auto indices = ngraph::opset3::Constant::create(
+                    ngraph::element::i64, {matmul_setup.A.gather_idxs_for_transpose.size()}, matmul_setup.A.gather_idxs_for_transpose);
+            const auto axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+            normalized_A_shape = std::make_shared<ngraph::opset3::Gather>(normalized_A_shape, indices, axis);
+        }
+        // B
+        if (matmul_setup.B.rank_diff) {
+            ngraph::OutputVector extended_shape_parts = {
+                    ngraph::opset3::Constant::create(
+                            ngraph::element::i64, {matmul_setup.B.rank_diff}, std::vector<int64_t>(matmul_setup.B.rank_diff, 1)), normalized_B_shape};
+            normalized_B_shape = std::make_shared<ngraph::opset3::Concat>(extended_shape_parts, 0);
+        }
+        if (!matmul_setup.B.gather_idxs_for_transpose.empty()) {
+            const auto indices = ngraph::opset3::Constant::create(
+                    ngraph::element::i64, {matmul_setup.B.gather_idxs_for_transpose.size()}, matmul_setup.B.gather_idxs_for_transpose);
+            const auto axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+            normalized_B_shape = std::make_shared<ngraph::opset3::Gather>(normalized_B_shape, indices, axis);
+        }
+        // Common
+        ngraph::OutputVector output_dims;
+        if (!matmul_setup.A.batch_gather_idxs.empty()) {
+            const auto max_shape = std::make_shared<ngraph::opset3::Maximum>(normalized_A_shape, normalized_B_shape);
+            const auto indices = ngraph::opset3::Constant::create(
+                    ngraph::element::i64, {matmul_setup.A.batch_gather_idxs.size()}, matmul_setup.A.batch_gather_idxs);
+            const auto axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+            const auto batch_dims = std::make_shared<ngraph::opset3::Gather>(max_shape, indices, axis);
+            output_dims.push_back(batch_dims);
+        }
+        const auto input_channels = std::make_shared<ngraph::opset3::Gather>(
+                normalized_A_shape,
+                ngraph::opset3::Constant::create(ngraph::element::i64, {1}, {matmul_setup.A.channel_idx}),
+                ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0}));
+        const auto output_channels = std::make_shared<ngraph::opset3::Gather>(
+                normalized_B_shape,
+                ngraph::opset3::Constant::create(ngraph::element::i64, {1}, {matmul_setup.B.channel_idx}),
+                ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0}));
+        output_dims.push_back(input_channels);
+        output_dims.push_back(output_channels);
+
+        const auto output_shape = std::make_shared<ngraph::opset3::Concat>(output_dims, 0);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, output_shape);
+        const auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{dsr_final}, parameters, "Transformed-MatMul");
+        return function;
+    }
+};
+
+
+TEST_P(DynamicToStaticShapeMatMul, CompareFunctions) {
+}
+INSTANTIATE_TEST_CASE_P(MatMul, DynamicToStaticShapeMatMul, combinations);
+
+}  // namespace
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_matmul.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_matmul.cpp
new file mode 100644 (file)
index 0000000..b3536bb
--- /dev/null
@@ -0,0 +1,136 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+enum DYNAMISM_MODE {
+    BOTH_INPUTS_DYNAMIC,
+    A_INPUT_DYNAMIC,
+    B_INPUT_DYNAMIC
+};
+
+struct MatMul_input_setup {
+    ngraph::Shape shape;
+    bool transpose;
+    // data for MatMul shape normalization and calculation
+    uint64_t rank_diff;
+    std::vector<int64_t> gather_idxs_for_transpose, batch_gather_idxs;
+    int64_t channel_idx;
+};
+
+struct MatMulTestCase {
+    MatMul_input_setup A, B;
+};
+
+const auto combinations = testing::Combine(
+        testing::Values(
+                DYNAMISM_MODE::BOTH_INPUTS_DYNAMIC,
+                DYNAMISM_MODE::A_INPUT_DYNAMIC,
+                DYNAMISM_MODE::B_INPUT_DYNAMIC),
+        testing::Values(
+                ngraph::element::f16,
+                ngraph::element::f32,
+                ngraph::element::i32,
+                ngraph::element::i64,
+                ngraph::element::u8),
+        testing::Values(
+// JIRA: 33925           MatMulTestCase{{{1024}, false, 1, {}, {}, 0}, {{1024, 1000}, false, 0, {}, {}, 1}},
+// JIRA: 33925           MatMulTestCase{{{1024}, true, 1, {1, 0}, {}, 0}, {{1, 1000}, false, 0, {}, {}, 1}},
+                MatMulTestCase{{{5, 10, 1024}, false, 0, {}, {0}, 1}, {{1024, 1000}, false, 1, {}, {0}, 2}},
+                MatMulTestCase{{{5, 10, 1024}, false, 0, {}, {0}, 1}, {{1, 1024, 1000}, false, 0, {}, {0}, 2}},
+                MatMulTestCase{{{5, 1024, 10}, true, 0, {0, 2, 1}, {0}, 1}, {{1, 1000, 1024}, true, 0, {0, 2, 1}, {0}, 2}},
+                MatMulTestCase{{{3, 1024, 10}, true, 1, {0, 1, 3, 2}, {0, 1}, 2}, {{5, 1, 1000, 1024}, true, 0, {0, 1, 3, 2}, {0, 1}, 3}}),
+        testing::Values(CommonTestUtils::DEVICE_MYRIAD));
+
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+using Parameters = std::tuple<
+    DYNAMISM_MODE,
+    DataType,
+    MatMulTestCase,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_MatMul : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    ngraph::ParameterVector setting_up_input_dynamism(
+            const DYNAMISM_MODE mode,
+            const std::shared_ptr<ngraph::opset3::Parameter> input_A,
+            const std::shared_ptr<ngraph::opset3::Parameter> input_B,
+            std::shared_ptr<ngraph::Node>& renewed_input_A,
+            std::shared_ptr<ngraph::Node>& renewed_input_B,
+            std::shared_ptr<ngraph::Node>& A_shape_node,
+            std::shared_ptr<ngraph::Node>& B_shape_node) const {
+        ngraph::ParameterVector parameters{input_A, input_B};
+
+        auto input_A_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_A->get_shape().size()});
+        auto input_B_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_B->get_shape().size()});
+
+        auto dsr_A = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input_A, input_A_dsr);
+        auto dsr_B = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input_B, input_B_dsr);
+
+        switch (mode) {
+            case DYNAMISM_MODE::BOTH_INPUTS_DYNAMIC: {
+                parameters.push_back(input_A_dsr);
+                parameters.push_back(input_B_dsr);
+                renewed_input_A = dsr_A;
+                renewed_input_B = dsr_B;
+                A_shape_node = input_A_dsr;
+                B_shape_node = input_B_dsr;
+                break;
+            }
+            case DYNAMISM_MODE::A_INPUT_DYNAMIC: {
+                parameters.push_back(input_A_dsr);
+                renewed_input_A = dsr_A;
+                renewed_input_B = input_B;
+                A_shape_node = input_A_dsr;
+                B_shape_node = ngraph::opset3::Constant::create(ngraph::element::i64, {input_B->get_shape().size()}, input_B->get_shape());
+                break;
+            }
+            case DYNAMISM_MODE::B_INPUT_DYNAMIC: {
+                parameters.push_back(input_B_dsr);
+                renewed_input_A = input_A;
+                renewed_input_B = dsr_B;
+                A_shape_node = ngraph::opset3::Constant::create(ngraph::element::i64, {input_A->get_shape().size()}, input_A->get_shape());
+                B_shape_node = input_B_dsr;
+                break;
+            }
+            default:
+                NGRAPH_UNREACHABLE("UNKNOWN DYNAMISM MODE for MatMul DSR graph comparison test");
+        }
+        return parameters;
+    }
+
+    void SetUp() override {
+        const auto& params = GetParam();
+        const auto& mode = std::get<0>(params);
+        const auto& data_type = std::get<1>(params);
+        const auto& matmul_setup = std::get<2>(params);
+        targetDevice = std::get<3>(params);
+
+        auto input_A = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.A.shape);
+        auto input_B = std::make_shared<ngraph::opset3::Parameter>(data_type, matmul_setup.B.shape);
+
+        std::shared_ptr<ngraph::Node> explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape;
+        const auto parameters = setting_up_input_dynamism(mode, input_A, input_B, explicit_A_input, explicit_B_input, normalized_A_shape, normalized_B_shape);
+        const auto node = std::make_shared<ngraph::opset3::MatMul>(explicit_A_input, explicit_B_input, matmul_setup.A.transpose, matmul_setup.B.transpose);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, parameters, "DSR-MatMul");
+    }
+};
+
+TEST_P(DSR_MatMul, CompareWithReference) {
+    Run();
+}
+// JIRA: 33997
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicMatMul, DSR_MatMul, combinations);
+}  // namespace